def test_stringlist_setitem(self): sum = EclSum(self.case) wells = sum.wells() wells[0] = "Bjarne" well0 = wells[0] self.assertTrue(well0 == "Bjarne") self.assertTrue(wells[0] == "Bjarne") wells[0] = "XXX" self.assertTrue(well0 == "Bjarne") self.assertTrue(wells[0] == "XXX")
class FlowData(FromSource): """ Flow data source class Args: input_case: Full path to eclipse case to load data from layers: List with definition of isolated layers, if present. """ def __init__( self, input_case: Union[Path, str], layers: Tuple = (), ): super().__init__() self._input_case: Path = Path(input_case) self._eclsum = EclSum(str(self._input_case)) self._init = EclFile(str(self._input_case.with_suffix(".INIT"))) self._grid = EclGrid(str(self._input_case.with_suffix(".EGRID"))) self._restart = EclFile(str(self._input_case.with_suffix(".UNRST"))) self._init = EclInitFile(self._grid, str(self._input_case.with_suffix(".INIT"))) self._wells = compdat.df(EclFiles(str(self._input_case))) self._layers = layers # pylint: disable=too-many-branches def _well_connections(self, perforation_handling_strategy: str) -> pd.DataFrame: """ Function to extract well connection coordinates from a Flow simulation including their opening and closure time. The output of this function will be filtered based on the configured perforation strategy. Args: perforation_handling_strategy: Strategy to be used when creating perforations. Valid options are bottom_point, top_point, multiple, time_avg_open_location and multiple_based_on_workovers. Returns: columns: WELL_NAME, X, Y, Z, DATE, OPEN, LAYER_ID """ if len(self._layers) > 0 and self._grid.nz is not self._layers[-1][-1]: raise ValueError( f"Number of layers from config ({self._layers[-1][-1]}) is not equal to " f"number of layers from flow simulation ({self._grid.nz}).") new_items = [] for _, row in self._wells.iterrows(): X, Y, Z = self._grid.get_xyz(ijk=(row["I"] - 1, row["J"] - 1, row["K1"] - 1)) if len(self._layers) > 0: for count, (i, j) in enumerate(self._layers): if row["K1"] in range(i, j + 1): layer_id = count break else: layer_id = 0 new_row = { "WELL_NAME": row["WELL"], "IJK": ( row["I"] - 1, row["J"] - 1, row["K1"] - 1, ), "X": X, "Y": Y, "Z": Z, "DATE": row["DATE"], "OPEN": bool(row["OP/SH"] == "OPEN"), "LAYER_ID": layer_id, } new_items.append(new_row) df = pd.DataFrame( new_items, columns=[ "WELL_NAME", "IJK", "X", "Y", "Z", "DATE", "OPEN", "LAYER_ID" ], ) df["DATE"] = pd.to_datetime(df["DATE"], format="%Y-%m-%d").dt.date try: perforation_strategy_method = getattr( perforation_strategy, perforation_handling_strategy) except AttributeError as attribute_error: raise NotImplementedError( f"The perforation handling strategy {perforation_handling_strategy} is unknown." ) from attribute_error return perforation_strategy_method(df).sort_values(["DATE"]) def _well_logs(self) -> pd.DataFrame: """ Function to extract well log information from a Flow simulation. Returns: columns: WELL_NAME, X, Y, Z, PERM (mD), PORO (-) """ coords: List = [] for well_name in self._wells["WELL"].unique(): unique_connections = self._wells[self._wells["WELL"] == well_name].drop_duplicates( subset=["I", "J", "K1", "K2"]) for _, connection in unique_connections.iterrows(): ijk = (connection["I"] - 1, connection["J"] - 1, connection["K1"] - 1) xyz = self._grid.get_xyz(ijk=ijk) perm_kw = self._init.iget_named_kw("PERMX", 0) poro_kw = self._init.iget_named_kw("PORO", 0) coords.append([ well_name, *xyz, perm_kw[self._grid.cell(i=ijk[0], j=ijk[1], k=ijk[2]).active_index], poro_kw[self._grid.cell(i=ijk[0], j=ijk[1], k=ijk[2]).active_index], ]) return pd.DataFrame( coords, columns=["WELL_NAME", "X", "Y", "Z", "PERM", "PORO"]) def _production_data(self) -> pd.DataFrame: """ Function to read production data for all producers and injectors from an Flow simulation. The simulation is required to write out the following vectors to the summary file: WOPR, WGPR, WWPR, WBHP, WTHP, WGIR, WWIR Returns: A DataFrame with a DateTimeIndex and the following columns: - date equal to index - WELL_NAME Well name as used in Flow - WOPR Well Oil Production Rate - WGPR Well Gas Production Rate - WWPR Well Water Production Rate - WOPT Well Cumulative Oil Production - WGPT Well Cumulative Gas Production - WWPT Well Cumulative Water Production - WBHP Well Bottom Hole Pressure - WTHP Well Tubing Head Pressure - WGIR Well Gas Injection Rate - WWIR Well Water Injection Rate - WSPR Well Salt Production Rate - WSIR Well Salt Injection Rate - WSPT Well Cumulative Salt Production - WSIT Well Cumulative Salt Injection - WTICHEA Well Injection Temperature - WTPCHEA Well Production Temperature - WSTAT Well status (OPEN, SHUT, STOP) - TYPE Well Type: "OP", "GP", "WI", "GI" - PHASE Main producing/injecting phase fluid: "OIL", "GAS", "WATER" Todo: * Remove depreciation warning suppression when solved in LibEcl. * Improve robustness pf setting of Phase and Type. """ keys = [ "WOPR", "WGPR", "WWPR", "WOPT", "WGPT", "WWPT", "WBHP", "WTHP", "WGIR", "WWIR", "WGIT", "WWIT", "WSPR", "WSIR", "WSPT", "WSIT", "WTPCHEA", "WTICHEA", "WSTAT", ] df_production_data = pd.DataFrame() # Suppress a depreciation warning inside LibEcl warnings.simplefilter("ignore", category=DeprecationWarning) with warnings.catch_warnings(): for well_name in self._eclsum.wells(): df = pd.DataFrame() df["date"] = self._eclsum.report_dates df["date"] = pd.to_datetime(df["date"]) df.set_index("date", inplace=True) for prod_key in keys: try: df[f"{prod_key}"] = self._eclsum.get_values( f"{prod_key}:{well_name}", report_only=True) except KeyError: df[f"{prod_key}"] = np.nan # Set columns that have only exact zero values to np.nan df.loc[:, (df == 0).all(axis=0)] = np.nan df["WELL_NAME"] = well_name df["PHASE"] = None df.loc[df["WOPR"] > 0, "PHASE"] = "OIL" df.loc[(df["WOPR"] == 0) & (df["WWPR"] > 0), "PHASE"] = "WATER" df.loc[df["WWIR"] > 0, "PHASE"] = "WATER" df.loc[df["WGIR"] > 0, "PHASE"] = "GAS" df["TYPE"] = None df.loc[df["WOPR"] > 0, "TYPE"] = "OP" df.loc[(df["WOPR"] == 0) & (df["WWPR"] > 0), "TYPE"] = "WP" df.loc[df["WWIR"] > 0, "TYPE"] = "WI" df.loc[df["WGIR"] > 0, "TYPE"] = "GI" # make sure the correct well type is set also when the well is shut in df[["PHASE", "TYPE"]] = df[["PHASE", "TYPE"]].fillna(method="backfill") df[["PHASE", "TYPE"]] = df[["PHASE", "TYPE"]].fillna(method="ffill") df_production_data = df_production_data.append(df) if df_production_data["WSTAT"].isna().all(): warnings.warn( "No WSTAT:* summary vectors in input case - setting default well status to OPEN." ) wstat_default = "OPEN" else: wstat_default = "STOP" df_production_data["WSTAT"] = df_production_data["WSTAT"].map({ 0: wstat_default, 1: "OPEN", # Producer OPEN 2: "OPEN", # Injector OPEN 3: "SHUT", 4: "STOP", 5: "SHUT", # PSHUT 6: "STOP", # PSTOP np.nan: wstat_default, }) # ensure that a type is assigned also if a well is never activated df_production_data[["PHASE", "TYPE"]] = df_production_data[[ "PHASE", "TYPE" ]].fillna(method="backfill") df_production_data[["PHASE", "TYPE" ]] = df_production_data[["PHASE", "TYPE" ]].fillna(method="ffill") df_production_data["date"] = df_production_data.index df_production_data["date"] = pd.to_datetime( df_production_data["date"]).dt.date return df_production_data def _faults(self) -> pd.DataFrame: """ Function to read fault plane data using ecl2df. Returns: A dataframe with columns NAME, X, Y, Z with data for fault planes """ eclfile = EclFiles(self._input_case) df_fault_keyword = faults.df(eclfile) points = [] for _, row in df_fault_keyword.iterrows(): i = row["I"] - 1 j = row["J"] - 1 k = row["K"] - 1 points.append((row["NAME"], i, j, k)) if (row["FACE"] == "X" or row["FACE"] == "X+" or row["FACE"] == "I" or row["FACE"] == "I+"): points.append((row["NAME"], i + 1, j, k)) elif (row["FACE"] == "Y" or row["FACE"] == "Y+" or row["FACE"] == "J" or row["FACE"] == "J+"): points.append((row["NAME"], i, j + 1, k)) elif (row["FACE"] == "Z" or row["FACE"] == "Z+" or row["FACE"] == "K" or row["FACE"] == "K+"): points.append((row["NAME"], i, j, k + 1)) elif row["FACE"] == "X-" or row["FACE"] == "I-": points.append((row["NAME"], i - 1, j, k)) elif row["FACE"] == "Y-" or row["FACE"] == "J-": points.append((row["NAME"], i, j - 1, k)) elif row["FACE"] == "Z-" or row["FACE"] == "K-": points.append((row["NAME"], i, j, k - 1)) else: raise ValueError( f"Could not interpret '{row['FACE']}' while reading the FAULTS keyword." ) df_faults = pd.DataFrame.from_records(points, columns=["NAME", "I", "J", "K"]) if not df_faults.empty: df_faults[["X", "Y", "Z"]] = pd.DataFrame( df_faults.apply( lambda row: list( self._grid.get_xyz(ijk=(row["I"], row["J"], row["K"])) ), axis=1, ).values.tolist()) return df_faults.drop(["I", "J", "K"], axis=1) def grid_cell_bounding_boxes(self, layer_id: int) -> np.ndarray: """ Function to get the bounding box (x, y and z min + max) for all grid cells Args: layer_id: The FlowNet layer id to be used to create the bounding box. Returns: A (active grid cells x 6) numpy array with columns [ xmin, xmax, ymin, ymax, zmin, zmax ] filtered on layer_id if not None. """ if self._layers: (k_min, k_max) = tuple(map(operator.sub, self._layers[layer_id], (1, 1))) else: (k_min, k_max) = (0, self._grid.nz) cells = [ cell for cell in self._grid.cells(active=True) if (k_min <= cell.k <= k_max) ] xyz = np.empty((8 * len(cells), 3)) for n_cell, cell in enumerate(cells): for n_corner, corner in enumerate(cell.corners): xyz[n_cell * 8 + n_corner, :] = corner xmin = xyz[:, 0].reshape(-1, 8).min(axis=1) xmax = xyz[:, 0].reshape(-1, 8).max(axis=1) ymin = xyz[:, 1].reshape(-1, 8).min(axis=1) ymax = xyz[:, 1].reshape(-1, 8).max(axis=1) zmin = xyz[:, 2].reshape(-1, 8).min(axis=1) zmax = xyz[:, 2].reshape(-1, 8).max(axis=1) return np.vstack([xmin, xmax, ymin, ymax, zmin, zmax]).T def _get_start_date(self): return self._eclsum.start_date def init(self, name: str) -> np.ndarray: """array with 'name' regions""" return self._init[name][0] def get_unique_regions(self, name: str) -> np.ndarray: """array with unique 'name' regions""" return np.unique(self._init[name][0]) def get_well_connections( self, perforation_handling_strategy: str) -> pd.DataFrame: """ Function to get dataframe with all well connection coordinates, filtered based on the perforation_handling_strategy. Args: perforation_handling_strategy: Strategy to be used when creating perforations. Valid options are bottom_point, top_point, multiple, time_avg_open_location and multiple_based_on_workovers. Returns: Dataframe with all well connection coordinates, filtered based on the perforation_handling_strategy. Columns: WELL_NAME, X, Y, Z, DATE, OPEN, LAYER_ID """ return self._well_connections( perforation_handling_strategy=perforation_handling_strategy) def bulk_volume_per_flownet_cell_based_on_voronoi_of_input_model( self, network: NetworkModel) -> np.ndarray: """Generate bulk volume distribution per grid cell in the FlowNet model based on the geometrical distribution of the volume in the original (full field) simulation model. I.e., the original model's volume will be distributed over the FlowNet's tubes by assigning original model grid cell volumes to the nearest FlowNet tube cell midpoint. Finally, the volume distributed to all cells in a tube will be summed and evenly redistributed over the tube. Args: network: FlowNet network instance. Returns: An array with volumes per flownetcell. """ # pylint: disable=too-many-locals flownet_cell_midpoints = np.array(network.cell_midpoints).T model_cell_mid_points = np.array( [cell.coordinate for cell in self._grid.cells(active=True)]) model_cell_volume = [ (cell.volume * self._init.iget_named_kw("NTG", 0)[cell.active_index]) for cell in self._grid.cells(active=True) ] # Number of tubes and tube cells properties_per_cell = pd.DataFrame( pd.DataFrame(data=network.grid.index, index=network.grid.model).index) number_of_tubes = properties_per_cell.groupby(["model"]).ngroups cell_volumes = np.zeros(len(properties_per_cell["model"].values)) # Identify the index of the last (inactive) cell of each tube which will have 0 volume inactive_cells = np.zeros(number_of_tubes) for i in range(number_of_tubes): inactive_cells[i] = (properties_per_cell.reset_index().groupby( ["model"]).groups[i][-1]) # depths should be a list of depths provided by the user. it may also be empty depths = network.volume_layering.copy() # Add 0 depth level and arrange from deep to shallow) depths.append(0) depths = list(set(depths)) depths.sort(reverse=True) # Perform mapping of volumes between two depth levels for index, depth in enumerate(depths): if index == 0: # Add a very deep dummy level depth_range = [1.0e10, depth] else: depth_range = [depths[index - 1], depth] tube_cell_volumes = np.zeros(len(flownet_cell_midpoints)) # Identify cells located between the current lower and upper depths levels flownet_indices = [ idx for idx, val in enumerate(network.cell_midpoints[2]) if (depth_range[0] >= val > depth_range[1]) ] model_indices = [ idx for idx, val in enumerate(model_cell_mid_points[:, 2]) if (depth_range[0] >= val > depth_range[1]) ] # Determine nearest flow tube cell for each cell in the original model tree = KDTree(flownet_cell_midpoints[flownet_indices, :]) _, matched_indices = tree.query( model_cell_mid_points[model_indices], k=[1]) # Assign each reservoir model volume to a flow tube for idx, val in enumerate(matched_indices): tube_cell_volumes[flownet_indices[ val[0]]] += model_cell_volume[model_indices[idx]] # Compute the total volumes per tube section between the current depth levels properties_per_cell["distributed_volume"] = tube_cell_volumes tube_volumes = properties_per_cell.groupby(by="model").sum().values # Evenly distribute tube volumes over the tube cells between the current depth levels for tube in range(number_of_tubes): indices = [ i for i, x in enumerate( network.grid.model.iloc[flownet_indices].values.tolist( )) if x == tube and flownet_indices[i] not in inactive_cells ] for _, idx in enumerate(indices): cell_volumes[flownet_indices[ idx]] += tube_volumes[tube] / len(indices) return cell_volumes @property def faults(self) -> pd.DataFrame: """dataframe with all fault data""" return self._faults() @property def production(self) -> pd.DataFrame: """dataframe with all production data""" return self._production_data() @property def well_logs(self) -> pd.DataFrame: """dataframe with all well log""" return self._well_logs() @property def grid(self) -> EclGrid: """the simulation grid with properties""" return self._grid @property def layers(self) -> Union[Tuple[Tuple[int, int]], Tuple]: """Get the list of top and bottom k-indeces of a the orignal model that represents a FlowNet layer""" return self._layers
class EclipseData(FromSource): """ Eclipse data source class Args: eclipse_case: Full path to eclipse case to load data from resample: Pandas resampling string perforation_handling_strategy: How to deal with perforations per well. ('bottom_point', 'top_point', 'multiple') """ def __init__( self, eclipse_case: Union[Path, str], resample: Optional[str] = None, perforation_handling_strategy: str = "bottom_point", ): super().__init__() self._eclipse_case: Path = Path(eclipse_case) self._eclsum = EclSum(str(self._eclipse_case)) self._grid = EclGrid(str(self._eclipse_case.with_suffix(".EGRID"))) self._restart = EclFile(str(self._eclipse_case.with_suffix(".UNRST"))) self._wells = WellInfo( self._grid, rst_file=self._restart, load_segment_information=True ) self._resample: Union[str, None] = resample self._perforation_handling_strategy: str = perforation_handling_strategy def _coordinates(self) -> pd.DataFrame: """ Function to extract well coordinates from an Flow/Eclipse simulation. Args: filename: Entire path to the simulated simulation case. This case must have both and EGRID and UNRST file. perforation_handling_strategy: How to deal with perforations per well. ('bottom_point', 'top_point', 'multiple') Returns: columns: WELL_NAME, X, Y, Z """ def multi_xyz_append(append_obj_list): for global_conn in append_obj_list[1]: coords.append( [append_obj_list[0], *self._grid.get_xyz(ijk=global_conn.ijk())] ) coords: List = [] for well_name in self._wells.allWellNames(): global_conns = self._wells[well_name][0].globalConnections() coord_append = coords.append if self._perforation_handling_strategy == "bottom_point": xyz = self._grid.get_xyz(ijk=global_conns[-1].ijk()) elif self._perforation_handling_strategy == "top_point": xyz = self._grid.get_xyz(ijk=global_conns[0].ijk()) elif self._perforation_handling_strategy == "multiple": xyz = [global_conns] coord_append = multi_xyz_append else: raise Exception( f"perforation strategy {self._perforation_handling_strategy} unknown" ) coord_append([well_name, *xyz]) return pd.DataFrame(coords, columns=["WELL_NAME", "X", "Y", "Z"]) def _production_data(self) -> pd.DataFrame: """ Function to read production data for all producers and injectors from an Flow/Eclipse simulation. The simulation is required to write out the following vectors to the summary file: WOPR, WGPR, WWPR, WBHP, WTHP, WGIR, WWIR Returns: A DataFrame with a DateTimeIndex and the following columns: - date equal to index - WELL_NAME Well name as used in Eclipse - WOPR Well Oil Production Rate - WGPR Well Gas Production Rate - WWPR Well Water Production Rate - WBHP Well Bottom Hole Pressure - WTHP Well Tubing Head Pressure - WGIR Well Gas Injection Rate - WWIR Well Water Injection Rate - WSTAT Well status (OPEN, SHUT, STOP) - TYPE Well Type: "OP", "GP", "WI", "GI" - PHASE Main producing/injecting phase fluid: "OIL", "GAS", "WATER" Todo: * Remove depreciation warning suppression when solved in LibEcl. * Improve robustness pf setting of Phase and Type. """ keys = ["WOPR", "WGPR", "WWPR", "WBHP", "WTHP", "WGIR", "WWIR", "WSTAT"] df_production_data = pd.DataFrame() start_date = self._get_start_date() # Suppress a depreciation warning inside LibEcl warnings.simplefilter("ignore", category=DeprecationWarning) with warnings.catch_warnings(): for well_name in self._eclsum.wells(): df = pd.DataFrame() df["date"] = self._eclsum.dates df["date"] = pd.to_datetime(df["date"]) df.set_index("date", inplace=True) for prod_key in keys: try: df[f"{prod_key}"] = self._eclsum[ f"{prod_key}:{well_name}" ].values except KeyError: df[f"{prod_key}"] = np.nan # Find number of leading empty rows (with only nan or 0 values) zero = df.fillna(0).eq(0).all(1).sum() if zero < df.shape[0]: # If there are no empty rows, prepend one for the start date if zero == 0: df1 = df.head(1) as_list = df1.index.tolist() idx = as_list.index(df1.index) as_list[idx] = pd.to_datetime(start_date) df1.index = as_list df = pd.concat([df1, df]) for col in df.columns: df[col].values[0] = 0 zero = 1 # Keep only the last empty row (well activation date) df = df.iloc[max(zero - 1, 0) :] # Assign well targets to the correct schedule dates df = df.shift(-1) # Make sure the row for the final date is not empty df.iloc[-1] = df.iloc[-2] # Set columns that have only exact zero values to np.nan df.loc[:, (df == 0).all(axis=0)] = np.nan df["WELL_NAME"] = well_name df_production_data = df_production_data.append(df) df_production_data["PHASE"] = None df_production_data.loc[df_production_data["WOPR"] > 0, "PHASE"] = "OIL" df_production_data.loc[df_production_data["WWIR"] > 0, "PHASE"] = "WATER" df_production_data.loc[df_production_data["WGIR"] > 0, "PHASE"] = "GAS" df_production_data["WSTAT"] = df_production_data["WSTAT"].map( { 1: "OPEN", # Producer OPEN 2: "OPEN", # Injector OPEN 3: "SHUT", 4: "STOP", 5: "SHUT", # PSHUT 6: "STOP", # PSTOP np.nan: "STOP", } ) df_production_data["TYPE"] = None df_production_data.loc[df_production_data["WOPR"] > 0, "TYPE"] = "OP" df_production_data.loc[df_production_data["WWIR"] > 0, "TYPE"] = "WI" df_production_data.loc[df_production_data["WGIR"] > 0, "TYPE"] = "GI" df_production_data[["PHASE", "TYPE"]] = df_production_data[ ["PHASE", "TYPE"] ].fillna(method="backfill") df_production_data["date"] = df_production_data.index df_production_data["date"] = pd.to_datetime(df_production_data["date"]).dt.date return df_production_data def _faults(self) -> pd.DataFrame: """ Function to read fault plane data using ecl2df. Returns: A dataframe with columns NAME, X, Y, Z with data for fault planes """ eclfile = EclFiles(self._eclipse_case) df_fault_keyword = faults.df(eclfile) points = [] for _, row in df_fault_keyword.iterrows(): i = row["I"] - 1 j = row["J"] - 1 k = row["K"] - 1 points.append((row["NAME"], i, j, k)) if row["FACE"] == "X" or row["FACE"] == "X+": points.append((row["NAME"], i + 1, j, k)) elif row["FACE"] == "Y" or row["FACE"] == "Y+": points.append((row["NAME"], i, j + 1, k)) elif row["FACE"] == "Z" or row["FACE"] == "Z+": points.append((row["NAME"], i, j, k + 1)) elif row["FACE"] == "X-": points.append((row["NAME"], i - 1, j, k)) elif row["FACE"] == "Y-": points.append((row["NAME"], i, j - 1, k)) elif row["FACE"] == "Z-": points.append((row["NAME"], i, j, k - 1)) else: raise ValueError( f"Could not interpret '{row['FACE']}' while reading the FAULTS keyword." ) df_faults = pd.DataFrame.from_records(points, columns=["NAME", "I", "J", "K"]) if not df_faults.empty: df_faults[["X", "Y", "Z"]] = pd.DataFrame( df_faults.apply( lambda row: list( self._grid.get_xyz(ijk=(row["I"], row["J"], row["K"])) ), axis=1, ).values.tolist() ) return df_faults.drop(["I", "J", "K"], axis=1) def _get_start_date(self): return self._eclsum.start_date @property def faults(self) -> pd.DataFrame: """dataframe with all fault data""" return self._faults() @property def production(self) -> pd.DataFrame: """dataframe with all production data""" return self._production_data() @property def coordinates(self) -> pd.DataFrame: """dataframe with all coordinates""" return self._coordinates()
class StormBackend: """ Class for STORM Backend operations (MNPO, simulations) Args: caseName (str): Model's name (EGG, OLYMPUS) to be used in all file names. Attributes: costs (dict): A dictionary with oil, water and gas production/injection costs. caseName (str): The model name, to be used in all file names. dumpName (str): A folder that will receive the temporary files to avoid cluttering. iterFname (str): A base name for all iteration files. verboseFlags (tuple): Sets the solver and time step verbosities. baseFolder (str): Folder containing the base model for simulation. wells (dict): Wells information (can be recovered from a summary file as well). numWells (int): Number of wells. phases (tuple): Oil, Water and Gas Phases (enabled by default) curData (EclSum): Points to a summary file. rates (dict): All rates info. currentControl (list): Current production control array for producers. currentInjectionControl (list): Current injection control array for injectors. currentDelta (list): Current delta for control. algorithmSpan (int): Number of control possibilities per iteration. deltaParams (dict): Delta equation parameters. producerSat (list): Producers' oil saturation. curIteration (int): Holds the current iteration. maxIterations (int): Maximum number of iterations (default: 40). dt (float): Time step length (in days). t0 (float): Simulation's initial time (days, default: 0). eps (float): Numerical correction factor for Modified NPV (MNPV). s0 (numpy array): Initial well saturations in a given iteration. optWellNPVs (list): Best Well NPVs. optWellMNPVs (list): Best Well MNPVs. optWellBHPs (list): Best Well BHPs. optWellSat (list): Best Well Oil Saturations. optInjections (list): Best Well Injection Rates. iterWellNPVs (list): Iteration Well NPVs. iterWellMNPVs (list): Iteration Well MNPVs. iterWellBHPs (list): Iteration Well BHPs. iterWellSat (list): Iteration Well Oil Saturations. iterWellOilRates (list): Iteration Well Oil Rates. iterWellWaterRates (list): Iteration Well Water Rates. iterDelta (list): Iteration Deltas. """ def __init__(self,caseName,**kwargs): """Class constructor. Args: caseName (str): Model's name (EGG, OLYMPUS) to be used in all file names. Key Args: costs (dict): Costs information. Should have the following keywords: 'OilProd','WaterProd','GasProd','OilInj','WaterInj','GasInj' producers (list): Producers' names injectors (list): Injectors' names prodControl (list): Producers' initial control injeControl (list): Injectors' initial control span (int): Possibility tree's span for MNPO (default: 9) """ self.costs = kwargs['costs'] # Cost initialization self.caseName = caseName # Case Name self.dumpName = 'DUMP' # Dump folder (TODO: make this flexible) self.iterFname = 'ITER{}I{}' # Iterations base name self.verboseFlags = (0,0) # Verbose Flags (TODO: change this) self.baseFolder = 'BASE' # Folder with the base .DATA file prodNames = kwargs['producers'] # Producer Wells (names) injNames = kwargs['injectors'] # Injector wells (names) self.wells = { # 'prod' : prodNames, # Wells dictionary 'inje' : injNames # } # self.numWells = len(self.wells['prod'] + self.wells['inje']) # Get number of wells self.prodConstraints = kwargs['prodConstraints'] self.injeConstraints = kwargs['injeConstraints'] #TODO: nao tem gas no EGG MODEL self.phases = (1,1,0) # Phases: oil, water, gas self.curData = None # Pointer to a summary file self.rates = { # Variable which holds rates info. 'wopr' : [], 'wwir' : [], 'wwpr' : [], 'wwir' : [], 'wgpr' : [], 'wgir' : [] } _p, _i = len(self.wells['prod']), len(self.wells['inje']) # Number of producers and number of injectors self.currentControl = kwargs['prodControl'] # self.currentInjectionControl = kwargs['injeControl'] # Initial BHPs, rates and delta self.currentDelta = np.array([0. for i in range(_p)]) # self.algorithmSpan = 9 if 'span' not in kwargs.keys() else kwargs['span'] # Number of possibilities (span) self.deltaParams = { # Delta parameters (TODO: make this flexible) 'k' : np.array([5 for i in range(_p)]), 'kappa' : np.array([1e-3 for i in range(_p)]), 'h' : np.array([1e-2 for i in range(_p)]) } self.producerSat = np.array([1. for i in range(_p)]) # Initial producers' oil saturation self.__deltaEquation(np.zeros(_p)) # Initial delta calculation (private method) self.curIteration = 0 # Current Iteration self.maxIterations = 80 if 'maxIterations' not in kwargs.keys() else kwargs['maxIterations'] # Maximum number of iterations self.dt = 90 if 'timeStep' not in kwargs.keys() else kwargs['timeStep'] # Time step Length (TODO: flexible (all of these)) self.eps = .04 if 'eps' not in kwargs.keys() else kwargs['eps'] # Epsilon for numerical correction in MNPV function self.t0 = 0 if 't0' not in kwargs.keys() else kwargs['t0'] # Initial time self.s0 = np.array([0. for i in range(_i)] + [1. for i in range(_p)]) # All initial well saturations def setVerbosity(self,solver,tstep): """Sets verbosities (solver, terminal output) of OPM Flow. Args: solver (int): Solver verbosity tstep (int): Time step verbosity Returns: None """ self.verboseFlags = (solver,tstep) def __deltaEquation(self,ds): """Calculates the delta parameters to be used in the MNPO, per production well. Args: ds (numpy array): Array of oil producers' saturation. Returns: None """ k, K, h = self.deltaParams['k'], self.deltaParams['kappa'], self.deltaParams['h'] # Unpacks delta parameters self.currentDelta = k * np.maximum(K, h * np.array(ds)) def __moveIters(self): """Moves the iteration files to the dump folder. Args: None. Returns: None. """ iterFiles = [files for files in glob.glob('*.DATA')] iterFiles.extend([files for files in glob.glob('*.INC') if 'SCH' in files]) # Gets all the iteration case files and their correspondent schedules. for files in iterFiles: subprocess.call(['mv',files,'{}'.format(self.dumpName)]) # Move the files to the dump folder. def execFlow(self,fname,output='true'): """Executes OPM flow. Args: fname (str): File to be executed. output (str='true'): Enables/Disables OPM's terminal output. Returns: None. """ _exec = [ # Generates commands for OPM Flow 'flow',fname,'--output-dir={}'.format(self.dumpName), '--solver-verbosity={}'.format(self.verboseFlags[0]), '--time-step-verbosity={}'.format(self.verboseFlags[1]), '--enable-opm-rst-file=true','--enable-terminal-output={}'.format(output), ] subprocess.call(_exec) # Call OPM Flow def clearDump(self): """Clears the dump folder. Args: None. Returns: None. """ for i in glob.glob('{}/*.*'.format(self.dumpName)): os.remove(i) def genVectors(self,well): """Generates rate vectors from the summary file currently loaded in the class. Args: well (str): Well to get the data from. Raises: StormBackendException: Summary file not found or loaded Returns: wopr (numpy array): Well Oil Production Rate wgpr (numpy array): Well Gas Production Rate wwpr (numpy array): Well Water Production Rate woir (numpy array): Well Oil Injection Rate wgir (numpy array): Well Gas Injection Rate wwir (numpy array): Well Water Injection Rate """ try: _n = len(self.curData.get_days()) # Get number of elements except: raise StormBackendException('Summary file not found or loaded.') if self.phases[0] != 0: try: wopr = self.curData.numpy_vector('WOPR:{}'.format(well)) # Gets oil production rates, or zeros if they do not exist except: wopr = np.zeros(_n) try: woir = self.curData.numpy_vector('WOIR:{}'.format(well)) # Gets oil injection rates, or zeros if they do not exist except: woir = np.zeros(_n) else: wopr,woir = np.zeros(_n),np.zeros(_n) if self.phases[1] != 0: try: wwpr = self.curData.numpy_vector('WWPR:{}'.format(well)) # Gets water production rates, or zeros if they do not exist except: wwpr = np.zeros(_n) try: wwir = self.curData.numpy_vector('WWIR:{}'.format(well)) # Gets oil injection rates, or zeros if they do not exist except: wwir = np.zeros(_n) else: wwpr,wwir = np.zeros(_n),np.zeros(_n) if self.phases[2] != 0: try: wgpr = self.curData.numpy_vector('WGPR:{}'.format(well)) # Gets gas injection rates, or zeros if they do not exist except: wgpr = np.zeros(_n) try: wgir = self.curData.numpy_vector('WGIR:{}'.format(well)) # Gets gas injection rates, or zeros if they do not exist except: wgir = np.zeros(_n) else: wgpr,wgir = np.zeros(_n),np.zeros(_n) return wopr,woir,wwpr,wwir,wgpr,wgir def getBHPs(self,well): """Gets a well's Bottom Hole Pressure (BHP) Args: well (str): Well to get BHP from. Raises: StormBackendException: BHP data not found. Returns: wbhp (numpy array): Well BHP """ try: wbhp = self.curData.numpy_vector('WBHP:{}'.format(well)) except: raise StormBackendException('BHP data not found.') return wbhp def getSaturation(self,well): """Calculates the oil saturation for a well. Args: well (str): Well to get the oil saturation from. Raises: StormBackendException: Summary file not found or loaded Returns: satOil (numpy array): Well's oil saturation """ _n = len(self.curData.get_days()) wopr,woir,wwpr,wwir,wgpr,wgir = self.genVectors(well) fpr = wopr+wgpr+wwpr satOil = np.zeros(_n) for i in range(_n): if fpr[i] != 0: satOil[i] = wopr[i]/fpr[i] return satOil def __initMNPOVars(self): """Initalializes the variables which will hold MNPO's various data. Args: None. Returns: None. """ self.optWellNPVs = [] # Best Well NPVs self.optWellMNPVs = [] # Best Well MNPVs self.optWellBHPs = [] # Best Well BHPs self.optWellSat = [] # Best Well Oil Saturations self.optInjections = [] # Best Well Injections self.iterWellMNPVs = [] # Iteration Well MNPVs self.iterWellBHPs = [] # Iteration Well BHPs self.iterWellSat = [] # Iteration Well Oil Saturations self.iterWellOilRates = [] # Iteration Well Oil Rates self.iterWellWaterRates = [] # Iteration Well Water Rates self.iterDelta = [] # Iteration Deltas self.rates = { # Rates initialization for MNPO 'wopr' : [[] for i in range(self.numWells)], 'wwir' : [[] for i in range(self.numWells)], 'wwpr' : [[] for i in range(self.numWells)], 'wwir' : [[] for i in range(self.numWells)], 'wgpr' : [[] for i in range(self.numWells)], 'wgir' : [[] for i in range(self.numWells)] } def loadSummary(self,fname): """Loads a summary file to the @curData attribute. Args: fname (str): Summary File Name Raises: StormBackendException: File not found. Returns: Nones. """ try: self.curData = EclSum(fname) # Creates an Eclipse Summary class for data handling except: raise StormBackendException('Eclipse Summary File "{}" not found.'.format(fname)) def wellIndex(self,well): """Returns a well index (a number which represents a well), akin to a h Args: well (str): Well to be indexed. Raises: StormBackendException: Well not found. Returns: int: Well "hash" (index). """ wells = self.wells['inje'] + self.wells['prod'] try: return wells.index(well) except ValueError: raise StormBackendException('Well "{}" not found in the model'.format(well)) def calculateNPV(self,**kwargs): """Calculates NPV given a summary file previously loaded in the class. The NPV can be either the standard or the Modified NPV (MNPV). Args: None. Key Args: mnpv (bool): Flag indicating if modified NPV (default: False) eps (double): Numerical corrector flag (default: 0.01) t0 (double): Initial time in days (default: 0) Raises: StormBackendException: Summary file not found or loaded. Returns: dict: A dictionary containing the following fields: wellNPVs - Well NPVs NPV - Sum of well NPVs oilProd - Oil Rates waterProd - Water Rates waterInj - Water Injections t0 - Initial Time s0 - Initial Saturations t - Time Vector s - Saturation Vector modified - Flag that indicates if the NPV is modified (MNPV) or NPV. """ modified = False if 'mnpv' not in kwargs.keys() else kwargs['mnpv'] # _eps = .01 if 'eps' not in kwargs.keys() else kwargs['eps'] # Variable initializations. eps = _eps / (1. - _eps) # t0 = 0 if 't0' not in kwargs.keys() else kwargs['t0'] # try: wells = list(self.curData.wells()) # Get well info t = self.curData.get_days() # Time info except: raise StormBackendException('Summary file not found or loaded.') _n = len(self.curData.get_days()) # Array size for time and rate handling _w = len(wells) # Auxiliar for well number s0 = np.ones(_w) if 's0' not in kwargs.keys() else kwargs['s0'] # Initial saturations (MNPV case) _dim = (_w,_n) # Dimensions oilRevenue, waterCost, injectionCost = np.zeros(_dim), np.zeros(_dim), np.zeros(_dim) # Array initializations sats = np.zeros(_dim) # Array initializations cO = 0.0 if 'OilProd' not in self.costs.keys() else self.costs['OilProd'] # cW = 0.0 if 'WaterProd' not in self.costs.keys() else self.costs['WaterProd'] # cG = 0.0 if 'GasProd' not in self.costs.keys() else self.costs['GasProd'] # Cost Unpacking cOi = 0.0 if 'OilInj' not in self.costs.keys() else self.costs['OilInj'] # cWi = 0.0 if 'WaterInj' not in self.costs.keys() else self.costs['WaterInj'] # cGi = 0.0 if 'GasInj' not in self.costs.keys() else self.costs['GasInj'] # for well in wells: # wopr,woir,wwpr,wwir,wgpr,wgir = self.genVectors(well) # Get rates for each well wIdx = self.wellIndex(well) # Index current well s = self.getSaturation(well) # sats[wIdx,:] = s # Get saturation for current well for i in range(_n): dT = t[i] - t0 if i == 0 else t[i] - t[i-1] # Calculate dt for NPV dS = abs(s[i] - s0[wIdx]) if i == 0 else abs(s[i] - s[i-1]) # Calculate ds for MNPV if s0[wIdx] == 0 and i == 0: dS = 0 _den = dS + eps if modified else 1. # Denominator for MNPV oilRevenue[wIdx,i] = dT*cO*wopr[i]/_den # waterCost[wIdx,i] = dT*cW*wwpr[i]/_den # NPV Revenue and Costs calculation injectionCost[wIdx,i] = dT*cWi*wwir[i]/_den # npvMatrix = oilRevenue - (waterCost + injectionCost) # Final Calculation (NPV) npvPeriod = [np.sum(npvMatrix[:,i]) for i in range(_n)] # _dic = { # Bundle results 'wellNPVs' : npvMatrix, 'NPV' : npvPeriod, 'oilProd' : oilRevenue, 'waterProd' : waterCost, 't0' : t0, 's0' : s0, 'modified' : modified, 'waterInj' : injectionCost, 't' : t, 's' : sats } return _dic #AQUI EH ONDE ACONTECE A BAGACEIRA def MNPOAlgorithm(self,verbosities=(True,'false')): """MNPO's Main Algorithm Phase. Executes MNPO until a negative NPV is found. Args: verbosities (tuple); controls STORMS and OPM verbosities. Default: (True, 'false'). Returns: None. """ def deltaS(): """An inner function to compute ds for delta calculation. Args: None. Returns: ds (numpy array): Saturation Variation for Delta Equation. """ s = self.optWellSat[-1].copy() k = 0 ds = np.zeros(len(self.wells['prod'])) # ds initialization for well in self.wells['prod']: # ds for each producer well j = self.wellIndex(well) ds[k] = abs(s[j] - self.s0[j]) # ds = |s - s0| k += 1 print(ds) return ds self.__initMNPOVars() # Init MNPO variables self.curIteration = 0 # Current iteration: 0 # Initialize injection control variables # forgetting factor forg_factor = 0.4 # initial values theta_w = np.array([1.0, 0.0]) P_w = 1000.0 * np.eye(2) K_w = np.array([0.0, 0.0]) theta_l = np.array([1.0, 0.0]) P_l = 1000.0 * np.eye(2) K_l = np.array([0.0, 0.0]) _p, _i = len(self.wells['prod']), len(self.wells['inje']) # Number of producers and number of injectors q_hat_w, q_hat_l = 0.,0. fwpr_max, flpr_max = 1600., 14000. while self.curIteration < self.maxIterations: # Run until maxIterations ### INJECTION CONTROL if self.curIteration > 1: inj, theta_w, K_w, P_w, q_hat_w, theta_l, K_l, P_l, q_hat_l = \ calc_inj(self.curData, old_data, theta_w, P_w, K_w, q_hat_w, theta_l, P_l, K_l, q_hat_l, forg_factor, fwpr_max, flpr_max, self.curIteration) self.currentInjectionControl = [inj/_i for i in range(_i)] ######################## print('Iteration {}'.format(self.curIteration+1)) if verbosities[0]: # Verbosity check print('Current Delta: {}'.format(self.currentDelta)) print('s0: {}'.format(self.s0)) print('t0: {} days'.format(self.t0)) positive = self.MNPOIteration(verbosities) # Check if a negative NPV is yielded if positive: # If NPV is positive, update info for next iteration self.__deltaEquation(deltaS()) # Delta Update self.s0 = self.optWellSat[-1].copy() # s0 Update self.t0 += self.dt # t0 Update self.currentControl = self.optWellBHPs[-1].copy() # Control Update self.curIteration += 1 # Iteration Update else: break # Negative NPV: break this loop. old_data = self.curData self.__moveIters() # ESSE EH O METODO EXECUTADO EM CADA ITERACAO! def MNPOIteration(self,verbosities=(True,'false')): """Runs a MNPO Iteration. Called by the MNPOAlgorithm() method. Args: verbosities (tuple); controls STORMS and OPM verbosities. Default: (True, 'false'). Returns: positive (bool): A flag indicating if the resultant NPV is positive """ verbose = verbosities[0] # STORMS Verbosity setting iteration = self.curIteration # Current iteration fIn = '{}/{}.DATA'.format(self.baseFolder,self.caseName) # fIterEnd = '{}{}.DATA'.format(iteration,self.caseName) # File base initializations rstName = '{}/{}{}'.format(self.dumpName,iteration-1,self.caseName) # scheduleEnd = 'SCHSTEP_{}.INC'.format(iteration) # config = { # Configurations for file building 'restart' : False, 'schName' : scheduleEnd } configIter = { # Configurations for iteration file building 'restart' : False } if iteration != 0: config['restart'], configIter['restart'] = True, True # config['rstName'], configIter['rstName'] = rstName, rstName # Configurations for restart cases config['rstIndex'], configIter['rstIndex'] = iteration, iteration # _n = self.algorithmSpan # _n: Local variable for algorithm span _window1 = np.array([self.currentControl[:] for i in range(_n)]) # first window controls _window2 = np.array([self.currentControl[:] for i in range(_n)]) # second window controls _w = len(self.wells['prod']) # _w: number of producers _dv = int(_n/2) # _dv: extreme point for delta span after first iteration if iteration == 0: # dmul = [i for i in range(0,-_n,-1)] # delta span for first iteration else: # dmul = [i for i in range(-_dv,_dv+1)] # delta span for next iterations self.iterDelta.append(self.currentDelta) # Save delta to iter results maxMNPVWell = [(-1e20, 0) for i in range(_w)] # Initialize max MNPV for each well with low values and well index 0 maxTempMNPVWell = maxMNPVWell[:] # Copy above variable maxSats = [1. for i in range(_w)] # Saturation for maximum MNPV maxWopr = [0. for i in range(_w)] # Oil Rate for maximum MNPV maxWwpr = [0. for i in range(_w)] # Water Rate for maximum MNPV bestCtrl = np.array(self.currentControl) # Array to hold the best control mnpvParams = { # mnpvParams: config for calcNPV(), MNPV case 't0' : self.t0, 's0' : self.s0, 'mnpv' : True, 'eps' : self.eps } npvParams = { # mnpvParams: config for calcNPV(), NPV case 't0' : self.t0, 'mnpv' : False } infoLst = [] # Iteration info # Aqui eh onde o controle acontece de fato! # PAREI POR AQUI! # TODO: # Ver nos artigos e no mestrado do Bulba se tem alguma explicação envolvendo testar várias possibilidades (span of possibilities) e também sobre janelas de controle # Tambem aproveitar e procurar alguma coisa sobre delta for i in range(_n): # For each possibility in span... _window1[i,:] = np.minimum(self.prodConstraints[1], \ np.maximum(self.prodConstraints[0], _window1[i,:] * (np.ones(_w) + dmul[i]*self.currentDelta))) # Gen Window 1 BHPs _window2[i,:] = np.minimum(self.prodConstraints[1], \ np.maximum(self.prodConstraints[0], _window2[i,:] * (np.ones(_w) + 2.*dmul[i]*self.currentDelta))) # Gen Window 2 BHPs if verbose: # print('\tSpan {}:'.format(iteration, i+1)) # Control verbosity print('\tControls:\n\t -{}\n\t -{}'.format(_window1[i,:],_window2[i,:])) # iterFile = self.iterFname.format(self.curIteration,i+1) + '.DATA' # Prepare iter files configIter['schName'] = 'SCHITER{}_{}.INC'.format(self.curIteration,i+1) # controlSch = { # Prepare iter schedules 'inje' : [self.currentInjectionControl, self.currentInjectionControl], 'prod' : [_window1[i,:], _window2[i,:]] } # APLICANDO O CONTROLE DE FATO print("Preparando o arquivo de controle (schedule)") if self.curIteration > 1: StormEclSupport.schedule(configIter['schName'],'iter',self.wells,controlSch,self.dt,iteration,('BHP','RATE')) # Storm Backend calls else: StormEclSupport.schedule(configIter['schName'],'iter',self.wells,controlSch,self.dt,iteration,('BHP','BHP')) # Storm Backend calls # Talvez seja um ponto critico: criacao de arquivos print("Preparando os arquivos!") print("Base file name, fIn = " + fIn) print("Schedule file name = " + configIter['schName']) print("Output file name: iterFile = " + iterFile) StormEclSupport.configureEclFiles(fIn,iterFile,**configIter) # # Imagino que seja aqui que da problema print("Executou ateh aqui! Ja deveria estar com tudo pronto para executar o FLOW e fazer a simulacao") self.execFlow(iterFile, verbosities[1]) print("Incrivel! Passou!") # Execute OPM Flow iterSummary = self.dumpName + '/' + self.iterFname.format(self.curIteration,i+1) + '.S0002' # self.loadSummary(iterSummary) # Load corresponding summary infoLst.append(self.calculateNPV(**mnpvParams)) # save MNPV info t = infoLst[i]['t'] w1Idx = 0 while t[w1Idx] - self.t0 <= self.dt and w1Idx < len(t): w1Idx += 1 # Get index for time before the second window k = 0 for well in self.wells['prod']: # For each producer well... wopr, woir, wwpr, wwir, wgpr, wgir = self.genVectors(well) # Get Rates j = self.wellIndex(well) mnpv = infoLst[i]['wellNPVs'][j] # Get current well's MNPV #sat = infoLst[i]['s'][j] #print(mnpv) if np.sum(mnpv) > maxMNPVWell[k][0]: # Update best MNPV info if it is better than maxMNPVWell[k] = (np.sum(mnpv),i+1) # the previous MNPV: maxTempMNPVWell[k] = (np.sum(mnpv[:w1Idx]),i+1) # Temp MNPV maxSats[k] = infoLst[i]['s'][j][w1Idx-1] # Oil Saturation maxWopr[k] = np.sum(wopr[:w1Idx]) # Oil Rate maxWwpr[k] = np.sum(wwpr[:w1Idx]) # Water Rate bestCtrl[k] = _window1[i,k] # BHP Control k += 1 self.iterWellMNPVs.append(infoLst) # Save MNPVs to output data if verbose: print('Best Control: {}'.format(bestCtrl)) # More verbosity controlSch= { # Schedule for best Control 'prod' : [bestCtrl], 'inje' : [self.currentInjectionControl] } #print(type(controlSch['prod'])) if self.curIteration > 1: StormEclSupport.schedule(config['schName'],'onestep',self.wells,controlSch,self.dt,iteration,('BHP','RATE')) # Prepare files for best simulation else: StormEclSupport.schedule(config['schName'],'onestep',self.wells,controlSch,self.dt,iteration,('BHP','BHP')) # Prepare files for best simulation StormEclSupport.configureEclFiles(fIn,fIterEnd,**config) # self.execFlow(fIterEnd, verbosities[1]) # Execute OPM Flow theSummary = '{}/{}{}.S{:04d}'.format(self.dumpName,iteration,self.caseName,iteration+1) # self.loadSummary(theSummary) # Load corresponding summary file opNPVs = self.calculateNPV(**npvParams) # Calculate NPV for best control positive = np.sum(opNPVs['NPV']) > 0. # Check if the NPV is positive if positive: # If NPV was positive... for well in self.curData.wells(): wopr,woir,wwpr,wwir,wgpr,wgir = self.genVectors(well) # wIdx = self.wellIndex(well) # Save oil and water rates for each well self.rates['wopr'][wIdx].extend(wopr) # self.rates['wwpr'][wIdx].extend(wwpr) # self.optWellNPVs.append(opNPVs) # Save best NPVs self.optWellMNPVs.append({ # Save best MNPVs (window 1, window 2, overall iteration) 'w1' : maxTempMNPVWell, 'w2' : maxMNPVWell, 'it' : self.calculateNPV(**mnpvParams) }) self.optWellBHPs.append(bestCtrl) # Save producers' control self.optInjections.append(self.currentInjectionControl) # Save injectors' control self.optWellSat.append(opNPVs['s'][:,-1]) # Save oil saturation self.iterWellSat.append(maxSats) # Save iter's well oil saturation self.iterWellOilRates.append(maxWopr) # Save iter's well oil rates self.iterWellWaterRates.append(maxWwpr) # Save iter's well water rates return positive # Return flag indicating NPV > 0 (or not). def collectInfo(self): """Given an execution of MNPO, collects the relevant info into a dictionary. Args: None. Returns: dict: A dictionary with the following keys: wellNPV: NPV for each well; wellMNPV: MNPV for each well; w1WellMNPVs: MNPV for each wells after the first window in predictor's phase; t: Time array; s: Oil saturation array; bhp: BHP values; NPV: Overall NPV array; MNPV: Overall MNPV array; cNPV: Cumulative NPV array; cMNPV: Cumulative MNPV array; w1WellSat: Well Oil Saturations after first window; w1WellOPR: Well Oil Rates after first window; w1WellWPR: Well Water Rates after first window; 'wopr': Oil Production Rates; 'wwpr': Water Production Rates; 'wwir': Water Injection Rates; 'injRates': Injection Control. """ _w = len(self.wells['prod']) npv, mnpv, t = [], [], [] w1WellMNPVs = [[] for i in range(_w)] n = min([len(self.optWellNPVs),len(self.optWellMNPVs),len(self.optWellSat)]) for i in range(n): _np = self.optWellNPVs[i] npv.extend(_np['NPV']) # Appends NPV to the result array t.extend(_np['t']) # Appends time data to time array if i == 0: wellNPVs = _np['wellNPVs'].copy() # Appends info for well NPVs else: # wellNPVs = np.concatenate((wellNPVs,_np['wellNPVs']),axis=1) # w1mnpv = self.optWellMNPVs[i]['w1'] for k in range(_w): w1WellMNPVs[k].append(w1mnpv[k][0]) # MNPV info for first window _mnp = self.optWellMNPVs[i]['it'] mnpv.extend(_mnp['NPV']) if i == 0: wellMNPVs = _mnp['wellNPVs'].copy() # Appends info for well MNPVs else: # wellMNPVs = np.concatenate((wellNPVs,_mnp['wellNPVs']),axis=1) # results = { # Bundles the results into a dictionary for .mat file saving. 'wellNPV' : wellNPVs, 'wellMNPV' : wellMNPVs, 't' : t, 'w1WellMNPVs' : np.array(w1WellMNPVs), 's' : np.array(self.optWellSat), 'bhp' : np.array(self.optWellBHPs), 'NPV' : npv, 'MNPV' : mnpv, 'cNPV' : np.cumsum(npv), 'cMNPV' : np.cumsum(mnpv), 'w1WellSat' : np.array(self.iterWellSat), 'w1WellOPR' : np.array(self.iterWellOilRates), 'w1WellWPR' : np.array(self.iterWellWaterRates), 'wopr' : np.array(self.rates['wopr']), 'wwpr' : np.array(self.rates['wwpr']), 'wwir' : np.array(self.rates['wwir']), 'injRates' : np.array(self.optInjections) } return results def MNPOPlots(self,iterations): """Generates plots and .mat files with relevant info. Args: iterations (int): Number of iterations to plot data. Returns: None. """ i = 0 # _n = [i for i in range(len(self.wells['prod'] + self.wells['inje']))] # s = 'DUMP/{0}EGG.S{1:04d}' # (TODO: make this flexible) self.rates['wopr'] = [[] for i in _n] # self.rates['woir'] = [[] for i in _n] # self.rates['wwpr'] = [[] for i in _n] # Initializations self.rates['wwir'] = [[] for i in _n] # self.rates['wgpr'] = [[] for i in _n] # self.rates['wgir'] = [[] for i in _n] # tVec = [] # NPV = [] # mNPV = [] # s0 = np.array([0]*8+[1]*4) # (TODO: make these 3 flexible) t0 = 0 # eps = .04 # for i in range(iterations): # For each iteration.... self.curData = EclSum(s.format(i,i+1)) # Load Summary info = self.calculateNPV(t0=t0,mnpv=False) # Calculate NPV infoMod = self.calculateNPV(t0=t0,s0=s0,eps=eps,mnpv=True) # Calculate MNPV t = self.curData.get_days() # Get time array... tVec.extend(list(t)) # ... and append it to existing data NPV.extend(info['NPV']) # Do the same with NPV... mNPV.extend(infoMod['NPV']) # ... and MNPV as well. if i == 0: # wellNPV = info['wellNPVs'] # Construct well NPV, wellMNPV = infoMod['wellNPVs'] # well MNPV, wellSat = info['s'] # and well Oil Saturation arrays else: # wellNPV = np.concatenate((wellNPV,info['wellNPVs']),axis=1) # wellMNPV = np.concatenate((wellMNPV,infoMod['wellNPVs']),axis=1) # wellSat = np.concatenate((wellSat,info['s']),axis=1) # t0 = t[-1] for well in self.curData.wells(): # _j = self.wellIndex(well) # s0[_j] = self.getSaturation(well)[-1] # wopr,woir,wwpr,wwir,wgpr,wgir = self.genVectors(well) # Build rates info. self.rates['wopr'][_j].extend(list(wopr)) # self.rates['woir'][_j].extend(list(woir)) # self.rates['wwpr'][_j].extend(list(wwpr)) # self.rates['wwir'][_j].extend(list(wwir)) # self.rates['wgpr'][_j].extend(list(wgpr)) # self.rates['wgir'][_j].extend(list(wgir)) # ''' From now on, plot rates and NPVs info. ''' plt.clf() plt.plot(np.array(tVec),np.array(NPV)) plt.grid(True) plt.xlabel('Time (days)') plt.ylabel('NPV (USD)') plt.title('NPV') plt.savefig('FIG/NPV.png',dpi=200) plt.clf() plt.plot(np.array(tVec),np.cumsum(NPV)) plt.grid(True) plt.xlabel('Time (days)') plt.ylabel('NPV (USD)') plt.title('Cumulative NPV') plt.savefig('FIG/NPVCumulative.png',dpi=200) plt.clf() plt.plot(np.array(tVec),np.array(mNPV)) plt.grid(True) plt.xlabel('Time (days)') plt.ylabel('MNPV (USD)') plt.title('Modified NPV') plt.savefig('FIG/MNPV.png',dpi=200) plt.clf() plt.plot(np.array(tVec),np.cumsum(mNPV)) plt.grid(True) plt.xlabel('Time (days)') plt.ylabel('MNPV (USD)') plt.title('Cumulative Modified NPV') plt.savefig('FIG/MNPVCumulative.png',dpi=200) for rate in ['wopr','wwpr']: plt.clf() for well in self.wells['prod']: plt.plot(np.array(tVec),np.array(self.rates[rate][self.wellIndex(well)]),label=well) plt.title(rate.upper()) plt.grid(True) plt.legend(loc='best') plt.savefig('FIG/{}.png'.format(rate.upper()), dpi=200) plt.clf() for well in self.wells['prod']: plt.plot(np.array(tVec),wellNPV[self.wellIndex(well),:],label=well) plt.title('Well NPVs') plt.legend(loc='best') plt.grid(True) plt.xlabel('Time (days)') plt.ylabel('NPV (USD)') plt.savefig('FIG/WellNPV.png',dpi=200) plt.clf() for well in self.wells['prod']: plt.plot(np.array(tVec),wellMNPV[self.wellIndex(well),:],label=well) plt.title('Well Modified NPVs') plt.legend(loc='best') plt.grid(True) plt.xlabel('Time (days)') plt.ylabel('NPV (USD)') plt.savefig('FIG/WellMNPV.png',dpi=200) ''' .mat file handling. ''' dataMat = { 'wellNPV' : wellNPV, 'wellMNPV' : wellMNPV, 'wellSat' : wellSat, 't' : np.array(tVec), 'NPV' : np.array(NPV), 'MNPV' : np.array(mNPV), 'wopr' : self.rates['wopr'], 'wwpr' : self.rates['wwpr'], 'wwir' : self.rates['wwir'] } sio.savemat('dumpData.mat',dataMat) # Save .mat file def directSimulation(self,controls,outputDir='DIRECT'): """Executes a Direct Simulation (no additional algorithm) and saves info to a .mat file. Args: controls (dict): Schedule controls. outputDir: Folder to save data (default: 'DIRECT') """ fIn = 'BASE/{}UNI.DATA'.format(self.caseName) # fOut = 'UNI{}.DATA'.format(self.caseName) # File settings _x = np.shape(controls['prod'])[0] # StormEclSupport.schedule('UNISCHEDULE.INC','incremental',self.wells,controls,90,_x,('BHP','BHP')) # Generate Files StormEclSupport.configureEclFiles(fIn,fOut,schName='UNISCHEDULE.INC') # tmp = self.dumpName self.dumpName = outputDir # Change Flow's dump directory... self.execFlow(fOut) # ... execute Flow... self.dumpName = tmp # ... and retrieve the original info dump = '{}/UNI{}.UNSMRY'.format(outputDir,self.caseName) # self.curData = EclSum(dump) # Load resultant summary file t = self.curData.get_days() # Time array _n = len(self.wells['prod'] + self.wells['inje']) # _n: number of wells self.rates['wopr'] = [[] for i in range(_n)] # Rate initializations self.rates['woir'] = [[] for i in range(_n)] self.rates['wwpr'] = [[] for i in range(_n)] self.rates['wwir'] = [[] for i in range(_n)] self.rates['wgpr'] = [[] for i in range(_n)] self.rates['wgir'] = [[] for i in range(_n)] for well in self.curData.wells(): _j = self.wellIndex(well) wopr,woir,wwpr,wwir,wgpr,wgir = self.genVectors(well) # Get rates for each well self.rates['wopr'][_j].extend(list(wopr)) self.rates['woir'][_j].extend(list(woir)) self.rates['wwpr'][_j].extend(list(wwpr)) self.rates['wwir'][_j].extend(list(wwir)) self.rates['wgpr'][_j].extend(list(wgpr)) self.rates['wgir'][_j].extend(list(wgir)) s0 = self.s0 # (TODO: make initial saturation flexible) t0 = self.t0 eps = self.eps info, infoMod = self.calculateNPV(t0=t0,modified=False), self.calculateNPV(t0=t0,s0=s0,eps=eps,mnpv=True) # Calculate NPV and MNPV dataMat = { # Bundle data into a dictionary 'wellNPV' : info['wellNPVs'], 'wellMNPV' : infoMod['wellNPVs'], 'wellSat' : info['s'], 't' : np.array(t), 'NPV' : info['NPV'], 'MNPV' : infoMod['NPV'], 'wopr' : self.rates['wopr'], 'wwpr' : self.rates['wwpr'], 'wwir' : self.rates['wwir'] } sio.savemat('dumpUniData.mat',dataMat) # Save data into a .mat file def rateDirectSimulation(self,dataName,outputDir='RATE'): """Executes a Direct Simulation (no additional algorithm) from data from a .mat file, uses Rate Control and saves info to a .mat file. Usage not recommended as it is. Args: dataName (dict): Schedule controls .mat file name. outputDir: Folder to save data (default: 'RATE') """ data = sio.loadmat(dataName) cP = np.transpose(data['wbhp'])[:,8:12] _x = np.shape(cP)[0] cI = [8*[60] for i in range(_x)] control = { 'prod' : cP, 'inje' : cI } fIn = 'BASE/{}UNI.DATA'.format(self.caseName) fOut = 'RATE{}.DATA'.format(self.caseName) StormEclSupport.configureEclFiles(fIn,fOut,schName='SCHRATE.INC') x = data['t'][0] t = [x[i] if i == 0 else x[i]-x[i-1] for i in range(len(x))] StormEclSupport.vectorSchedule('SCHRATE.INC',self.wells,control,t,'ORAT') tmp = self.dumpName self.dumpName = outputDir self.execFlow(fOut) self.dumpName = tmp dump = '{}/RATE{}.UNSMRY'.format(outputDir,self.caseName) self.curData = EclSum(dump) t = self.curData.get_days() _n = len(self.wells['prod'] + self.wells['inje']) self.rates['wopr'] = [[] for i in range(_n)] self.rates['woir'] = [[] for i in range(_n)] self.rates['wwpr'] = [[] for i in range(_n)] self.rates['wwir'] = [[] for i in range(_n)] self.rates['wgpr'] = [[] for i in range(_n)] self.rates['wgir'] = [[] for i in range(_n)] for well in self.curData.wells(): _j = self.wellIndex(well) wopr,woir,wwpr,wwir,wgpr,wgir = self.genVectors(well) self.rates['wopr'][_j].extend(list(wopr)) self.rates['woir'][_j].extend(list(woir)) self.rates['wwpr'][_j].extend(list(wwpr)) self.rates['wwir'][_j].extend(list(wwir)) self.rates['wgpr'][_j].extend(list(wgpr)) self.rates['wgir'][_j].extend(list(wgir)) s0 = np.array([0]*8+[1]*4) t0 = 0 eps = .04 info, infoMod = self.calculateNPV(t0=t0,modified=False), self.calculateNPV(t0=t0,s0=s0,eps=eps,mnpv=True) dataMat = { 'wellNPV' : info['wellNPVs'], 'wellMNPV' : infoMod['wellNPVs'], 'wellSat' : info['s'], 't' : np.array(t), 'NPV' : info['NPV'], 'MNPV' : infoMod['NPV'], 'wopr' : self.rates['wopr'], 'wwpr' : self.rates['wwpr'], 'wwir' : self.rates['wwir'] } sio.savemat('dumpRateData.mat',dataMat)
class FlowData(FromSource): """ Flow data source class Args: input_case: Full path to eclipse case to load data from layers: List with definition of isolated layers, if present. perforation_handling_strategy: How to deal with perforations per well. ('bottom_point', 'top_point', 'multiple') """ def __init__( self, input_case: Union[Path, str], layers: Tuple = (), perforation_handling_strategy: str = "bottom_point", ): super().__init__() self._input_case: Path = Path(input_case) self._eclsum = EclSum(str(self._input_case)) self._init = EclFile(str(self._input_case.with_suffix(".INIT"))) self._grid = EclGrid(str(self._input_case.with_suffix(".EGRID"))) self._restart = EclFile(str(self._input_case.with_suffix(".UNRST"))) self._init = EclInitFile(self._grid, str(self._input_case.with_suffix(".INIT"))) self._wells = compdat.df(EclFiles(str(self._input_case))) self._layers = layers self._perforation_handling_strategy: str = perforation_handling_strategy # pylint: disable=too-many-branches def _well_connections(self) -> pd.DataFrame: """ Function to extract well connection coordinates from a Flow simulation including their opening and closure time. The output of this function will be filtered based on the configured perforation strategy. Returns: columns: WELL_NAME, X, Y, Z, DATE, OPEN, LAYER_ID """ if len(self._layers) > 0 and self._grid.nz is not self._layers[-1][-1]: raise ValueError( f"Number of layers from config ({self._layers[-1][-1]}) is not equal to " f"number of layers from flow simulation ({self._grid.nz}).") new_items = [] for _, row in self._wells.iterrows(): X, Y, Z = self._grid.get_xyz(ijk=(row["I"] - 1, row["J"] - 1, row["K1"] - 1)) if len(self._layers) > 0: for count, (i, j) in enumerate(self._layers): if row["K1"] in range(i, j + 1): layer_id = count break else: layer_id = 0 new_row = { "WELL_NAME": row["WELL"], "IJK": ( row["I"] - 1, row["J"] - 1, row["K1"] - 1, ), "X": X, "Y": Y, "Z": Z, "DATE": row["DATE"], "OPEN": bool(row["OP/SH"] == "OPEN"), "LAYER_ID": layer_id, } new_items.append(new_row) df = pd.DataFrame( new_items, columns=[ "WELL_NAME", "IJK", "X", "Y", "Z", "DATE", "OPEN", "LAYER_ID" ], ) df["DATE"] = pd.to_datetime(df["DATE"], format="%Y-%m-%d").dt.date try: perforation_strategy_method = getattr( perforation_strategy, self._perforation_handling_strategy) except AttributeError as attribute_error: raise NotImplementedError( f"The perforation handling strategy {self._perforation_handling_strategy} is unknown." ) from attribute_error return perforation_strategy_method(df).sort_values(["DATE"]) def _well_logs(self) -> pd.DataFrame: """ Function to extract well log information from a Flow simulation. Returns: columns: WELL_NAME, X, Y, Z, PERM (mD), PORO (-) """ coords: List = [] for well_name in self._wells["WELL"].unique(): unique_connections = self._wells[self._wells["WELL"] == well_name].drop_duplicates( subset=["I", "J", "K1", "K2"]) for _, connection in unique_connections.iterrows(): ijk = (connection["I"] - 1, connection["J"] - 1, connection["K1"] - 1) xyz = self._grid.get_xyz(ijk=ijk) perm_kw = self._init.iget_named_kw("PERMX", 0) poro_kw = self._init.iget_named_kw("PORO", 0) coords.append([ well_name, *xyz, perm_kw[self._grid.cell(i=ijk[0], j=ijk[1], k=ijk[2]).active_index], poro_kw[self._grid.cell(i=ijk[0], j=ijk[1], k=ijk[2]).active_index], ]) return pd.DataFrame( coords, columns=["WELL_NAME", "X", "Y", "Z", "PERM", "PORO"]) def _production_data(self) -> pd.DataFrame: """ Function to read production data for all producers and injectors from an Flow simulation. The simulation is required to write out the following vectors to the summary file: WOPR, WGPR, WWPR, WBHP, WTHP, WGIR, WWIR Returns: A DataFrame with a DateTimeIndex and the following columns: - date equal to index - WELL_NAME Well name as used in Flow - WOPR Well Oil Production Rate - WGPR Well Gas Production Rate - WWPR Well Water Production Rate - WOPT Well Cumulative Oil Production - WGPT Well Cumulative Gas Production Rate - WWPT Well Cumulative Water Production Rate - WBHP Well Bottom Hole Pressure - WTHP Well Tubing Head Pressure - WGIR Well Gas Injection Rate - WWIR Well Water Injection Rate - WSTAT Well status (OPEN, SHUT, STOP) - TYPE Well Type: "OP", "GP", "WI", "GI" - PHASE Main producing/injecting phase fluid: "OIL", "GAS", "WATER" Todo: * Remove depreciation warning suppression when solved in LibEcl. * Improve robustness pf setting of Phase and Type. """ keys = [ "WOPR", "WGPR", "WWPR", "WOPT", "WGPT", "WWPT", "WBHP", "WTHP", "WGIR", "WWIR", "WGIT", "WWIT", "WSTAT", ] df_production_data = pd.DataFrame() # Suppress a depreciation warning inside LibEcl warnings.simplefilter("ignore", category=DeprecationWarning) with warnings.catch_warnings(): for well_name in self._eclsum.wells(): df = pd.DataFrame() df["date"] = self._eclsum.report_dates df["date"] = pd.to_datetime(df["date"]) df.set_index("date", inplace=True) for prod_key in keys: try: df[f"{prod_key}"] = self._eclsum.get_values( f"{prod_key}:{well_name}", report_only=True) except KeyError: df[f"{prod_key}"] = np.nan # Set columns that have only exact zero values to np.nan df.loc[:, (df == 0).all(axis=0)] = np.nan df["WELL_NAME"] = well_name df["PHASE"] = None df.loc[df["WOPR"] > 0, "PHASE"] = "OIL" df.loc[df["WWIR"] > 0, "PHASE"] = "WATER" df.loc[df["WGIR"] > 0, "PHASE"] = "GAS" df["TYPE"] = None df.loc[df["WOPR"] > 0, "TYPE"] = "OP" df.loc[df["WWIR"] > 0, "TYPE"] = "WI" df.loc[df["WGIR"] > 0, "TYPE"] = "GI" # make sure the correct well type is set also when the well is shut in df[["PHASE", "TYPE"]] = df[["PHASE", "TYPE"]].fillna(method="backfill") df[["PHASE", "TYPE"]] = df[["PHASE", "TYPE"]].fillna(method="ffill") df_production_data = df_production_data.append(df) if df_production_data["WSTAT"].isna().all(): warnings.warn( "No WSTAT:* summary vectors in input case - setting default well status to OPEN." ) wstat_default = "OPEN" else: wstat_default = "STOP" df_production_data["WSTAT"] = df_production_data["WSTAT"].map({ 0: wstat_default, 1: "OPEN", # Producer OPEN 2: "OPEN", # Injector OPEN 3: "SHUT", 4: "STOP", 5: "SHUT", # PSHUT 6: "STOP", # PSTOP np.nan: wstat_default, }) # ensure that a type is assigned also if a well is never activated df_production_data[["PHASE", "TYPE"]] = df_production_data[[ "PHASE", "TYPE" ]].fillna(method="backfill") df_production_data[["PHASE", "TYPE" ]] = df_production_data[["PHASE", "TYPE" ]].fillna(method="ffill") df_production_data["date"] = df_production_data.index df_production_data["date"] = pd.to_datetime( df_production_data["date"]).dt.date return df_production_data def _faults(self) -> pd.DataFrame: """ Function to read fault plane data using ecl2df. Returns: A dataframe with columns NAME, X, Y, Z with data for fault planes """ eclfile = EclFiles(self._input_case) df_fault_keyword = faults.df(eclfile) points = [] for _, row in df_fault_keyword.iterrows(): i = row["I"] - 1 j = row["J"] - 1 k = row["K"] - 1 points.append((row["NAME"], i, j, k)) if row["FACE"] == "X" or row["FACE"] == "X+": points.append((row["NAME"], i + 1, j, k)) elif row["FACE"] == "Y" or row["FACE"] == "Y+": points.append((row["NAME"], i, j + 1, k)) elif row["FACE"] == "Z" or row["FACE"] == "Z+": points.append((row["NAME"], i, j, k + 1)) elif row["FACE"] == "X-": points.append((row["NAME"], i - 1, j, k)) elif row["FACE"] == "Y-": points.append((row["NAME"], i, j - 1, k)) elif row["FACE"] == "Z-": points.append((row["NAME"], i, j, k - 1)) else: raise ValueError( f"Could not interpret '{row['FACE']}' while reading the FAULTS keyword." ) df_faults = pd.DataFrame.from_records(points, columns=["NAME", "I", "J", "K"]) if not df_faults.empty: df_faults[["X", "Y", "Z"]] = pd.DataFrame( df_faults.apply( lambda row: list( self._grid.get_xyz(ijk=(row["I"], row["J"], row["K"])) ), axis=1, ).values.tolist()) return df_faults.drop(["I", "J", "K"], axis=1) def _grid_cell_bounding_boxes(self, layer_id: Optional[int] = None ) -> np.ndarray: """ Function to get the bounding box (x, y and z min + max) for all grid cells Args: layer_id: The FlowNet layer id to be used to create the bounding box. Returns: A (active grid cells x 6) numpy array with columns [ xmin, xmax, ymin, ymax, zmin, zmax ] filtered on layer_id if not None. """ if layer_id is not None: (k_min, k_max) = tuple(map(operator.sub, self._layers[layer_id], (1, 1))) else: (k_min, k_max) = (0, self._grid.nz) cells = [ cell for cell in self._grid.cells(active=True) if (k_min <= cell.k <= k_max) ] xyz = np.empty((8 * len(cells), 3)) for n_cell, cell in enumerate(cells): for n_corner, corner in enumerate(cell.corners): xyz[n_cell * 8 + n_corner, :] = corner xmin = xyz[:, 0].reshape(-1, 8).min(axis=1) xmax = xyz[:, 0].reshape(-1, 8).max(axis=1) ymin = xyz[:, 1].reshape(-1, 8).min(axis=1) ymax = xyz[:, 1].reshape(-1, 8).max(axis=1) zmin = xyz[:, 2].reshape(-1, 8).min(axis=1) zmax = xyz[:, 2].reshape(-1, 8).max(axis=1) return np.vstack([xmin, xmax, ymin, ymax, zmin, zmax]).T def _get_start_date(self): return self._eclsum.start_date def init(self, name: str) -> np.ndarray: """array with 'name' regions""" return self._init[name][0] def get_unique_regions(self, name: str) -> np.ndarray: """array with unique 'name' regions""" return np.unique(self._init[name][0]) @property def grid_cell_bounding_boxes(self) -> np.ndarray: """Boundingboxes for all gridcells""" return self._grid_cell_bounding_boxes() @property def faults(self) -> pd.DataFrame: """dataframe with all fault data""" return self._faults() @property def production(self) -> pd.DataFrame: """dataframe with all production data""" return self._production_data() @property def well_connections(self) -> pd.DataFrame: """dataframe with all well connection coordinates""" return self._well_connections() @property def well_logs(self) -> pd.DataFrame: """dataframe with all well log""" return self._well_logs() @property def grid(self) -> EclGrid: """the simulation grid with properties""" return self._grid @property def layers(self) -> Union[Tuple[Tuple[int, int]], Tuple]: """Get the list of top and bottom k-indeces of a the orignal model that represents a FlowNet layer""" return self._layers
class FlowData(FromSource): """ Flow data source class Args: input_case: Full path to eclipse case to load data from perforation_handling_strategy: How to deal with perforations per well. ('bottom_point', 'top_point', 'multiple') """ def __init__( self, input_case: Union[Path, str], perforation_handling_strategy: str = "bottom_point", ): super().__init__() self._input_case: Path = Path(input_case) self._eclsum = EclSum(str(self._input_case)) self._grid = EclGrid(str(self._input_case.with_suffix(".EGRID"))) self._restart = EclFile(str(self._input_case.with_suffix(".UNRST"))) self._wells = WellInfo( self._grid, rst_file=self._restart, load_segment_information=True ) self._perforation_handling_strategy: str = perforation_handling_strategy # pylint: disable=too-many-branches def _coordinates(self) -> pd.DataFrame: """ Function to extract well coordinates from an Flow simulation. Returns: columns: WELL_NAME, X, Y, Z """ def multi_xyz_append(append_obj_list): for global_conn in append_obj_list[1]: coords.append( [append_obj_list[0], *self._grid.get_xyz(ijk=global_conn.ijk())] ) coords: List = [] for well_name in self._wells.allWellNames(): global_conns = self._wells[well_name][0].globalConnections() coord_append = coords.append if self._perforation_handling_strategy == "bottom_point": xyz = self._grid.get_xyz(ijk=global_conns[-1].ijk()) elif self._perforation_handling_strategy == "top_point": xyz = self._grid.get_xyz(ijk=global_conns[0].ijk()) elif self._perforation_handling_strategy == "multiple": xyz = [global_conns] coord_append = multi_xyz_append elif self._perforation_handling_strategy == "time_avg_open_location": connection_open_time = {} for i, conn_status in enumerate(self._wells[well_name]): time = datetime.datetime.strptime( str(conn_status.simulationTime()), "%Y-%m-%d %H:%M:%S" ) if i == 0: prev_time = time for connection in conn_status.globalConnections(): if connection.ijk() not in connection_open_time: connection_open_time[connection.ijk()] = 0.0 elif connection.isOpen(): connection_open_time[connection.ijk()] += ( time - prev_time ).total_seconds() else: connection_open_time[connection.ijk()] += 0.0 prev_time = time xyz_values = np.zeros((1, 3), dtype=np.float64) total_open_time = sum(connection_open_time.values()) if total_open_time > 0: for connection, open_time in connection_open_time.items(): xyz_values += np.multiply( np.array(self._grid.get_xyz(ijk=connection)), open_time / total_open_time, ) else: for connection, open_time in connection_open_time.items(): xyz_values += np.divide( np.array(self._grid.get_xyz(ijk=connection)), len(connection_open_time.items()), ) xyz = tuple(*xyz_values) else: raise Exception( f"perforation strategy {self._perforation_handling_strategy} unknown" ) coord_append([well_name, *xyz]) return pd.DataFrame(coords, columns=["WELL_NAME", "X", "Y", "Z"]) def _production_data(self) -> pd.DataFrame: """ Function to read production data for all producers and injectors from an Flow simulation. The simulation is required to write out the following vectors to the summary file: WOPR, WGPR, WWPR, WBHP, WTHP, WGIR, WWIR Returns: A DataFrame with a DateTimeIndex and the following columns: - date equal to index - WELL_NAME Well name as used in Flow - WOPR Well Oil Production Rate - WGPR Well Gas Production Rate - WWPR Well Water Production Rate - WBHP Well Bottom Hole Pressure - WTHP Well Tubing Head Pressure - WGIR Well Gas Injection Rate - WWIR Well Water Injection Rate - WSTAT Well status (OPEN, SHUT, STOP) - TYPE Well Type: "OP", "GP", "WI", "GI" - PHASE Main producing/injecting phase fluid: "OIL", "GAS", "WATER" Todo: * Remove depreciation warning suppression when solved in LibEcl. * Improve robustness pf setting of Phase and Type. """ keys = ["WOPR", "WGPR", "WWPR", "WBHP", "WTHP", "WGIR", "WWIR", "WSTAT"] df_production_data = pd.DataFrame() start_date = self._get_start_date() # Suppress a depreciation warning inside LibEcl warnings.simplefilter("ignore", category=DeprecationWarning) with warnings.catch_warnings(): for well_name in self._eclsum.wells(): df = pd.DataFrame() df["date"] = self._eclsum.report_dates df["date"] = pd.to_datetime(df["date"]) df.set_index("date", inplace=True) for prod_key in keys: try: df[f"{prod_key}"] = self._eclsum.get_values( f"{prod_key}:{well_name}", report_only=True ) except KeyError: df[f"{prod_key}"] = np.nan # Find number of leading empty rows (with only nan or 0 values) zero = df.fillna(0).eq(0).all(1).sum() if zero < df.shape[0]: # If there are no empty rows, prepend one for the start date if zero == 0: df1 = df.head(1) as_list = df1.index.tolist() idx = as_list.index(df1.index) as_list[idx] = pd.to_datetime(start_date) df1.index = as_list df = pd.concat([df1, df]) for col in df.columns: df[col].values[0] = 0 zero = 1 # Keep only the last empty row (well activation date) df = df.iloc[max(zero - 1, 0) :] # Assign well targets to the correct schedule dates df = df.shift(-1) # Make sure the row for the final date is not empty df.iloc[-1] = df.iloc[-2] # Set columns that have only exact zero values to np.nan df.loc[:, (df == 0).all(axis=0)] = np.nan df["WELL_NAME"] = well_name df["PHASE"] = None df.loc[df["WOPR"] > 0, "PHASE"] = "OIL" df.loc[df["WWIR"] > 0, "PHASE"] = "WATER" df.loc[df["WGIR"] > 0, "PHASE"] = "GAS" df["TYPE"] = None df.loc[df["WOPR"] > 0, "TYPE"] = "OP" df.loc[df["WWIR"] > 0, "TYPE"] = "WI" df.loc[df["WGIR"] > 0, "TYPE"] = "GI" # make sure the correct well type is set also when the well is shut in df[["PHASE", "TYPE"]] = df[["PHASE", "TYPE"]].fillna(method="backfill") df[["PHASE", "TYPE"]] = df[["PHASE", "TYPE"]].fillna(method="ffill") df_production_data = df_production_data.append(df) if df_production_data["WSTAT"].isna().all(): warnings.warn( "No WSTAT:* summary vectors in input case - setting default well status to OPEN." ) wstat_default = "OPEN" else: wstat_default = "STOP" df_production_data["WSTAT"] = df_production_data["WSTAT"].map( { 1: "OPEN", # Producer OPEN 2: "OPEN", # Injector OPEN 3: "SHUT", 4: "STOP", 5: "SHUT", # PSHUT 6: "STOP", # PSTOP np.nan: wstat_default, } ) # ensure that a type is assigned also if a well is never activated df_production_data[["PHASE", "TYPE"]] = df_production_data[ ["PHASE", "TYPE"] ].fillna(method="backfill") df_production_data[["PHASE", "TYPE"]] = df_production_data[ ["PHASE", "TYPE"] ].fillna(method="ffill") df_production_data["date"] = df_production_data.index df_production_data["date"] = pd.to_datetime(df_production_data["date"]).dt.date return df_production_data def _faults(self) -> pd.DataFrame: """ Function to read fault plane data using ecl2df. Returns: A dataframe with columns NAME, X, Y, Z with data for fault planes """ eclfile = EclFiles(self._input_case) df_fault_keyword = faults.df(eclfile) points = [] for _, row in df_fault_keyword.iterrows(): i = row["I"] - 1 j = row["J"] - 1 k = row["K"] - 1 points.append((row["NAME"], i, j, k)) if row["FACE"] == "X" or row["FACE"] == "X+": points.append((row["NAME"], i + 1, j, k)) elif row["FACE"] == "Y" or row["FACE"] == "Y+": points.append((row["NAME"], i, j + 1, k)) elif row["FACE"] == "Z" or row["FACE"] == "Z+": points.append((row["NAME"], i, j, k + 1)) elif row["FACE"] == "X-": points.append((row["NAME"], i - 1, j, k)) elif row["FACE"] == "Y-": points.append((row["NAME"], i, j - 1, k)) elif row["FACE"] == "Z-": points.append((row["NAME"], i, j, k - 1)) else: raise ValueError( f"Could not interpret '{row['FACE']}' while reading the FAULTS keyword." ) df_faults = pd.DataFrame.from_records(points, columns=["NAME", "I", "J", "K"]) if not df_faults.empty: df_faults[["X", "Y", "Z"]] = pd.DataFrame( df_faults.apply( lambda row: list( self._grid.get_xyz(ijk=(row["I"], row["J"], row["K"])) ), axis=1, ).values.tolist() ) return df_faults.drop(["I", "J", "K"], axis=1) def _grid_cell_bounding_boxes(self) -> np.ndarray: """ Function to get the bounding box (x, y and z min + max) for all grid cells Returns: A (active grid cells x 6) numpy array with columns [ xmin, xmax, ymin, ymax, zmin, zmax ] """ xyz = np.empty((8 * self._grid.get_num_active(), 3)) for active_index in range(self._grid.get_num_active()): for corner in range(0, 8): xyz[active_index * 8 + corner, :] = self._grid.get_cell_corner( corner, active_index=active_index ) xmin = xyz[:, 0].reshape(-1, 8).min(axis=1) xmax = xyz[:, 0].reshape(-1, 8).max(axis=1) ymin = xyz[:, 1].reshape(-1, 8).min(axis=1) ymax = xyz[:, 1].reshape(-1, 8).max(axis=1) zmin = xyz[:, 2].reshape(-1, 8).min(axis=1) zmax = xyz[:, 2].reshape(-1, 8).max(axis=1) return np.vstack([xmin, xmax, ymin, ymax, zmin, zmax]).T def _get_start_date(self): return self._eclsum.start_date @property def grid_cell_bounding_boxes(self) -> np.ndarray: """Boundingboxes for all gridcells""" return self._grid_cell_bounding_boxes() @property def faults(self) -> pd.DataFrame: """dataframe with all fault data""" return self._faults() @property def production(self) -> pd.DataFrame: """dataframe with all production data""" return self._production_data() @property def coordinates(self) -> pd.DataFrame: """dataframe with all coordinates""" return self._coordinates()
class SumTest(EclTest): def setUp(self): self.case = self.createTestPath(case) self.ecl_sum = EclSum(self.case) self.assertIsInstance(self.ecl_sum, EclSum) def test_load(self): self.assertIsNotNone(self.ecl_sum, "Load failed") def test_invalid(self): with self.assertRaises(IOError): sum = EclSum("Does/not/exist") def test_KeyError(self): sum = self.ecl_sum with self.assertRaises(KeyError): v = sum.numpy_vector("KeyMissing") with self.assertRaises(KeyError): v = sum.get_interp("Missing" , days = 750) with self.assertRaises(KeyError): v = sum.get_interp_vector("Missing" , days_list = [750]) def test_contains(self): self.assertTrue( "FOPT" in self.ecl_sum) self.assertFalse( "MISSING" in self.ecl_sum ) def test_interp(self): sum = self.ecl_sum self.assertAlmostEqual(sum.get_interp("WWCT:OP_3", days=750), 0.11719122) self.assertAlmostEqual(sum.get_interp("WWCT:OP_3", date=datetime.date(2004, 1, 1)), 0.603358387947) v = sum.get_interp_vector("WOPT:OP_1", days_list=[100, 200, 400]) self.assertAlmostEqualList([805817.11875, 1614955.34677419, 3289267.67857143 ], v) v = sum.get_interp_vector("WGPT:OP_2", date_list=[datetime.date(2002, 1, 1), datetime.date(2003, 1, 1), datetime.date(2004, 1, 1)]) self.assertAlmostEqualList(v, [8.20773632e+08, 9.68444032e+08, 1.02515213e+09]) self.assertEqual(sum.get_interp("FOPT" , days = 0) , 0) self.assertEqual(sum.get_interp("WOPR:OP_1" , days = 0) , 0) self.assertEqual(sum.get_interp("WOPR:OP_1" , date=datetime.date(2000,1,1)) , 0) self.assertEqual(sum.get_interp("WOPR:OP_1" , days = 31) , 7996) self.assertEqual(sum.get_interp("WOPR:OP_1" , date=datetime.date(2000,2,1)) , 7996) FPR = sum.numpy_vector("FPR") self.assertFloatEqual(sum.get_interp("FPR" , days = 0) , FPR[0]) self.assertFloatEqual(sum.get_interp("FPR" , days = 31) , FPR[1]) with self.assertRaises(ValueError): sum.get_interp("WOPR:OP_1") with self.assertRaises(ValueError): sum.get_interp("WOPR:OP_1" , days=10 , date = datetime.date(2000,1,1)) def test_LLINEAR(self): sum = EclSum( self.createTestPath("Statoil/ECLIPSE/Heidrun/LGRISSUE/EM-LTAA-ISEG_CARFIN_NWPROPS")) self.assertTrue( sum.has_key("LLINEARS") ) def test_wells(self): wells = self.ecl_sum.wells() wells.sort() self.assertListEqual([well for well in wells], ["OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "WI_1", "WI_2", "WI_3"]) wells = self.ecl_sum.wells(pattern="*_3") wells.sort() self.assertListEqual([well for well in wells], ["OP_3", "WI_3"]) groups = self.ecl_sum.groups() groups.sort() self.assertListEqual([group for group in groups], ['GMWIN', 'OP', 'WI']) def test_last( self ): last = self.ecl_sum.get_last("FOPT") self.assertFloatEqual(last.value, 38006336.0) self.assertFloatEqual(last.days, 1826.0) self.assertEqual(last.date, datetime.datetime(2004, 12, 31, 0, 0, 0)) self.assertFloatEqual(self.ecl_sum.last_value("FGPT"), 6605249024.0) self.assertEqual( len(self.ecl_sum) , 63 ) def test_dates( self ): sum = self.ecl_sum d = sum.dates self.assertEqual(d[0], datetime.datetime(2000, 1, 1, 0, 0, 0)) self.assertEqual(d[62], datetime.datetime(2004, 12, 31, 0, 0, 0)) self.assertEqual(len(d), 63) self.assertEqual(d[25], datetime.datetime(2001, 12, 1, 0, 0, 0)) self.assertEqual(sum.iget_date(25), datetime.datetime(2001, 12, 1, 0, 0, 0)) mpl_dates = sum.mpl_dates self.assertAlmostEqual(mpl_dates[25], 730820) days = sum.days self.assertAlmostEqual(days[50], 1461) self.assertEqual(sum.start_time, datetime.datetime(2000, 1, 1, 0, 0, 0)) self.assertEqual(sum.end_time, datetime.datetime(2004, 12, 31, 0, 0, 0)) self.assertTrue(sum.check_sim_time(datetime.datetime(2004, 12, 31, 0, 0, 0))) self.assertEqual(sum.end_date , datetime.date(2004, 12, 31)) def test_dates2( self ): sum = EclSum(self.createTestPath("Statoil/ECLIPSE/FF12/FF12_2013B3_AMAP2")) self.assertEqual(sum.end_date , datetime.date(2045, 1, 1)) def test_keys(self): sum = self.ecl_sum self.assertRaises(KeyError, sum.__getitem__, "BJARNE") v = sum["FOPT"] self.assertEqual(len(v), 63) def test_index(self): sum = self.ecl_sum index = sum.get_key_index("TCPUDAY") self.assertEqual(index, 10239) def test_report(self): sum = self.ecl_sum self.assertEqual(sum.get_report(date=datetime.date(2000, 10, 1)), 10) self.assertEqual(sum.get_report(date=datetime.date(2000, 10, 3)), -1) self.assertEqual(sum.get_report(date=datetime.date(1980, 10, 3)), -1) self.assertEqual(sum.get_report(date=datetime.date(2012, 10, 3)), -1) self.assertEqual(sum.get_report(days=91), 3) self.assertEqual(sum.get_report(days=92), -1) self.assertAlmostEqual(sum.get_interp("FOPT", days=91), sum.get_from_report("FOPT", 3)) self.assertEqual(sum.first_report, 1) self.assertEqual(sum.last_report, 62) self.assertEqual(sum.get_report_time(10), datetime.date(2000, 10, 1)) self.assertFloatEqual(sum.get_from_report("FOPT", 10), 6.67447e+06) def test_fwrite(self): ecl_sum = EclSum(self.case, lazy_load=False) with TestAreaContext("python/sum-test/fwrite") as work_area: ecl_sum.fwrite(ecl_case="CASE") self.assertTrue(True) def test_block(self): sum = self.ecl_sum index_ijk = sum.get_key_index("BPR:15,28,1") index_num = sum.get_key_index("BPR:1095") self.assertEqual(index_ijk, index_num) def test_restart(self): hist = EclSum(self.createTestPath("Statoil/ECLIPSE/sum-restart/history/T07-4A-W2011-18-P1")) base = EclSum(self.createTestPath("Statoil/ECLIPSE/sum-restart/prediction/BASECASE")) pred = EclSum(self.createTestPath("Statoil/ECLIPSE/sum-restart/prediction/BASECASE"), include_restart=False) self.assertIsNotNone(hist) self.assertIsNotNone(base) self.assertIsNotNone(pred) def test_case1(self ): self.assertTrue(self.ecl_sum.path == self.createTestPath(path)) self.assertTrue(self.ecl_sum.base == base) self.assertTrue(self.ecl_sum.case == self.createTestPath(case)) self.assertTrue(self.ecl_sum.abs_path == self.createTestPath(path)) def test_case2( self ): cwd = os.getcwd() os.chdir(self.createTestPath(path)) sum = EclSum(base) self.assertIsNone(sum.path) self.assertTrue(sum.base == base) self.assertTrue(sum.case == base) self.assertTrue(sum.abs_path == self.createTestPath(path)) os.chdir(cwd) def test_var_properties( self ): sum = self.ecl_sum self.assertRaises(KeyError, sum.smspec_node, "BJARNE") node = sum.smspec_node("FOPT") self.assertTrue(node.isTotal()) self.assertFalse(node.isHistorical()) node = sum.smspec_node("FOPR") self.assertFalse(node.isTotal()) self.assertFalse(node.isHistorical()) self.assertTrue(node.keyword == "FOPR") node = sum.smspec_node("FOPRH") self.assertFalse(node.isTotal()) self.assertTrue(node.isHistorical()) self.assertTrue(node.isRate()) self.assertTrue(node.keyword == "FOPRH") node = sum.smspec_node("WOPR:OP_1") self.assertFalse(node.isTotal()) self.assertTrue(node.isRate()) self.assertTrue(node.keyword == "WOPR") node = sum.smspec_node("WOPT:OP_1") self.assertTrue(node.isTotal()) self.assertFalse(node.isRate()) self.assertTrue(node.unit == "SM3") self.assertTrue(node.wgname == "OP_1") self.assertTrue(node.keyword == "WOPT") self.assertTrue(sum.unit("FOPR") == "SM3/DAY") node = sum.smspec_node("FOPTH") self.assertTrue(node.isTotal()) self.assertFalse(node.isRate()) self.assertIsNone(node.wgname) node = sum.smspec_node("BPR:1095") self.assertEqual(node.num, 1095) def test_stringlist_gc(self): sum = EclSum(self.case) wells = sum.wells() well1 = wells[0] del wells self.assertTrue(well1 == "OP_1") def test_stringlist_reference(self): sum = EclSum(self.case) wells = sum.wells() self.assertListEqual([well for well in wells], ['OP_1', 'OP_2', 'OP_3', 'OP_4', 'OP_5', 'WI_1', 'WI_2', 'WI_3']) self.assertIsInstance(wells, StringList) def test_stringlist_setitem(self): sum = EclSum(self.case) wells = sum.wells() wells[0] = "Bjarne" well0 = wells[0] self.assertTrue(well0 == "Bjarne") self.assertTrue(wells[0] == "Bjarne") wells[0] = "XXX" self.assertTrue(well0 == "Bjarne") self.assertTrue(wells[0] == "XXX") def test_segment(self): sum = EclSum(self.createTestPath("Statoil/ECLIPSE/Oseberg/F8MLT/F8MLT-F4")) segment_vars = sum.keys("SOFR:F-8:*") self.assertIn("SOFR:F-8:1", segment_vars) for var in segment_vars: tmp = var.split(":") nr = int(tmp[2]) self.assertTrue(nr >= 0) def test_return_types(self): self.assertIsInstance(self.ecl_sum.alloc_time_vector(True), TimeVector) key_index = self.ecl_sum.get_general_var_index("FOPT") self.assertIsInstance(self.ecl_sum.alloc_data_vector(key_index, True), DoubleVector) def test_timeRange(self): sum = EclSum(self.case) with self.assertRaises(TypeError): trange = sum.timeRange(interval = "1") trange = sum.timeRange(interval = "1X") trange = sum.timeRange(interval = "YY") trange = sum.timeRange(interval = "MY") with self.assertRaises(ValueError): trange = sum.timeRange( start = datetime.datetime(2000,1,1) , end = datetime.datetime(1999,1,1) ) sim_start = datetime.datetime(2000, 1, 1, 0, 0, 0) sim_end = datetime.datetime(2004, 12, 31, 0, 0, 0) trange = sum.timeRange( interval = "1Y") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[1] == datetime.date( 2001 , 1 , 1 )) self.assertTrue( trange[2] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[3] == datetime.date( 2003 , 1 , 1 )) self.assertTrue( trange[4] == datetime.date( 2004 , 1 , 1 )) self.assertTrue( trange[5] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange( interval = "1M") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange( start = datetime.date( 2002 , 1 , 15), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange( start = datetime.date( 2002 , 1 , 15) , end = datetime.date( 2003 , 1 , 15), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2003 , 2 , 1 )) trange = sum.timeRange( start = datetime.date( 2002 , 1 , 15) , end = datetime.datetime( 2003 , 1 , 15,0,0,0), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2003 , 2 , 1 )) # Loading this dataset is a test of loading a case where one report step is missing. def test_Heidrun(self): sum = EclSum( self.createTestPath("Statoil/ECLIPSE/Heidrun/Summary/FF12_2013B3_CLEAN_RS")) self.assertEqual( 452 , len(sum)) self.assertFloatEqual( 1.8533144e+8 , sum.last_value("FOPT")) trange = sum.timeRange( start = datetime.date( 2015 , 1 , 1), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2016 , 2 , 1 )) for t in trange: sum.get_interp( "FOPT" , date = t ) def test_regularProduction(self): sum = EclSum(self.case) with self.assertRaises(TypeError): trange = TimeVector.createRegular( sum.start_time , sum.end_time , "1M" ) prod = sum.blockedProduction("FOPR" , trange) with self.assertRaises(KeyError): trange = TimeVector.createRegular( sum.start_time , sum.end_time , "1M" ) prod = sum.blockedProduction("NoNotThis" , trange) trange = sum.timeRange(interval = "2Y") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2006 , 1 , 1 )) trange = sum.timeRange(interval = "5Y") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange(interval = "6M") wprod1 = sum.blockedProduction("WOPT:OP_1" , trange) wprod2 = sum.blockedProduction("WOPT:OP_2" , trange) wprod3 = sum.blockedProduction("WOPT:OP_3" , trange) wprod4 = sum.blockedProduction("WOPT:OP_4" , trange) wprod5 = sum.blockedProduction("WOPT:OP_5" , trange) fprod = sum.blockedProduction("FOPT" , trange) gprod = sum.blockedProduction("GOPT:OP" , trange) wprod = wprod1 + wprod2 + wprod3 + wprod4 + wprod5 for (w,f,g) in zip(wprod, fprod,gprod): self.assertFloatEqual( w , f ) self.assertFloatEqual( w , g ) def test_writer(self): writer = EclSum.writer("CASE" , datetime.date( 2000 , 1 , 1) , 10 , 10 , 5) self.assertIsInstance(self.ecl_sum, EclSum) writer.addVariable( "FOPT" ) self.assertTrue( writer.has_key( "FOPT" )) writer.addTStep( 1 , 100 ) def test_aquifer(self): case = EclSum( self.createTestPath( "Statoil/ECLIPSE/Aquifer/06_PRESSURE_R009-0")) self.assertTrue( "AAQR:2" in case ) def test_restart_mapping(self): history = EclSum( self.createTestPath( "Statoil/ECLIPSE/SummaryRestart/iter-1/NOR-2013A_R007-0") ) total = EclSum( self.createTestPath( "Statoil/ECLIPSE/SummaryRestart/Prediction/NOR-2013A_R007_PRED-0") , include_restart = True) history_dates = history.get_dates( ) total_dates = total.get_dates( ) for i in range(len(history_dates)): self.assertEqual( history_dates[i] , total_dates[i] ) keys = history.keys( pattern = "W*") for key in keys: if key in total: self.assertEqual( history.iget( key , 5 ) , total.iget( key , 5 )) self.assertFalse( "WGPR:NOT_21_D" in history ) self.assertTrue( "WGPR:NOT_21_D" in total ) node = total.smspec_node("WGPR:NOT_21_D") self.assertEqual( total.iget( "WGPR:NOT_21_D", 5) , node.default) def test_write(self): with TestAreaContext("my_space") as area: intersect_summary = EclSum( self.createTestPath( "Statoil/ECLIPSE/SummaryRestart/iter-1/NOR-2013A_R007-0"), lazy_load=False ) self.assertIsNotNone(intersect_summary) write_location = os.path.join(os.getcwd(), "CASE") intersect_summary.fwrite(ecl_case=write_location) reloaded_summary = EclSum(write_location) self.assertEqual(intersect_summary.keys(), reloaded_summary.keys()) def test_ix_case(self): intersect_summary = EclSum(self.createTestPath("Statoil/ECLIPSE/ix/summary/CREATE_REGION_AROUND_WELL")) self.assertIsNotNone(intersect_summary) self.assertTrue( "HWELL_PROD" in [intersect_summary.smspec_node(key).wgname for key in intersect_summary.keys()] ) eclipse_summary = EclSum(self.createTestPath("Statoil/ECLIPSE/ix/summary/ECL100/E100_CREATE_REGION_AROUND_WELL")) self.assertIsNotNone(eclipse_summary) hwell_padder = lambda key : key if key.split(":")[-1] != "HWELL_PR" else key + "OD" self.assertEqual( intersect_summary.keys("WWCT*"), list(map(hwell_padder, eclipse_summary.keys("WWCT*"))) ) def test_ix_write(self): for data_set in [ "Statoil/ECLIPSE/ix/summary/CREATE_REGION_AROUND_WELL", "Statoil/ECLIPSE/ix/troll/IX_NOPH3_R04_75X75X1_GRID2.SMSPEC" ]: with TestAreaContext("my_space" + data_set.split("/")[-1]) as area: intersect_summary = EclSum(self.createTestPath(data_set), lazy_load=False) self.assertIsNotNone(intersect_summary) write_location = os.path.join(os.getcwd(), "CASE") intersect_summary.fwrite(ecl_case=write_location) reloaded_summary = EclSum(write_location) self.assertEqual( list(intersect_summary.keys()), list(reloaded_summary.keys()) ) def test_ix_caseII(self): troll_summary = EclSum( self.createTestPath("Statoil/ECLIPSE/ix/troll/IX_NOPH3_R04_75X75X1_GRID2.SMSPEC")) self.assertIsNotNone(troll_summary) self.assertTrue("WMCTL:Q21BH1" in list(troll_summary.keys())) def test_resample(self): time_points = TimeVector() start_time = self.ecl_sum.get_data_start_time() end_time = self.ecl_sum.get_end_time() delta = end_time - start_time N = 25 time_points.initRange( CTime(start_time), CTime(end_time), CTime(int(delta.total_seconds()/(N - 1)))) time_points.append(CTime(end_time)) resampled = self.ecl_sum.resample( "OUTPUT_CASE", time_points ) for key in self.ecl_sum.keys(): self.assertIn( key, resampled ) self.assertEqual(self.ecl_sum.get_data_start_time(), resampled.get_data_start_time()) delta = self.ecl_sum.get_end_time() - resampled.get_end_time() self.assertTrue( delta.total_seconds() <= 1 ) keys = ["FOPT", "FOPR", "BPR:15,28,1", "WGOR:OP_1"] for key in keys: for time_index,t in enumerate(time_points): self.assertFloatEqual(resampled.iget( key, time_index), self.ecl_sum.get_interp_direct( key, t)) def test_summary_units(self): self.assertEqual(self.ecl_sum.unit_system, EclUnitTypeEnum.ECL_METRIC_UNITS) # The case loaded in this test originates in a simulation # which was shut down brutally. This test verifies that we # can create a valid ecl_sum instance from what we find. def test_broken_case(self): ecl_sum = EclSum( self.createTestPath("Statoil/ECLIPSE/SummaryFail3/COMBINED-AUTUMN2018_CARBSENS-0"))
def test_stringlist_reference(self): sum = EclSum(self.case) wells = sum.wells() self.assertListEqual([well for well in wells], ['OP_1', 'OP_2', 'OP_3', 'OP_4', 'OP_5', 'WI_1', 'WI_2', 'WI_3']) self.assertIsInstance(wells, StringList)
def test_stringlist_gc(self): sum = EclSum(self.case) wells = sum.wells() well1 = wells[0] del wells self.assertTrue(well1 == "OP_1")