def __init__( self, app, ensembles: Optional[list] = None, csvfile_parameters: pathlib.Path = None, csvfile_smry: pathlib.Path = None, time_index: str = "monthly", column_keys: Optional[list] = None, drop_constants: bool = True, ): super().__init__() WEBVIZ_ASSETS.add( pathlib.Path(webviz_subsurface.__file__).parent / "_assets" / "css" / "container.css") self.theme = app.webviz_settings["theme"] self.time_index = time_index self.column_keys = column_keys self.ensembles = ensembles self.csvfile_parameters = csvfile_parameters self.csvfile_smry = csvfile_smry if ensembles is not None: self.emodel = EnsembleSetModel( ensemble_paths={ ens: app.webviz_settings["shared_settings"] ["scratch_ensembles"][ens] for ens in ensembles }) self.pmodel = ParametersModel( dataframe=self.emodel.load_parameters(), theme=self.theme, drop_constants=drop_constants, ) self.vmodel = SimulationTimeSeriesModel( dataframe=self.emodel.load_smry(time_index=self.time_index, column_keys=self.column_keys), theme=self.theme, ) elif self.csvfile_parameters is None: raise ValueError( "Either ensembles or csvfile_parameters must be specified") else: self.pmodel = ParametersModel( dataframe=read_csv(csvfile_parameters), theme=self.theme, drop_constants=drop_constants, ) if self.csvfile_smry is not None: self.vmodel = SimulationTimeSeriesModel( dataframe=read_csv(csvfile_smry), theme=self.theme.plotly_theme) else: self.vmodel = None self.set_callbacks(app)
def __init__( self, app: dash.Dash, ensembles: Optional[list] = None, statistics_file: str = "share/results/tables/gridpropstatistics.csv", csvfile_statistics: pathlib.Path = None, csvfile_smry: pathlib.Path = None, surface_renaming: Optional[dict] = None, time_index: str = "monthly", column_keys: Optional[list] = None, ): super().__init__() WEBVIZ_ASSETS.add( pathlib.Path(webviz_subsurface.__file__).parent / "_assets" / "css" / "container.css") # TODO(Sigurd) fix this once we get a separate webviz_settings parameter self.theme: WebvizConfigTheme = app.webviz_settings["theme"] self.time_index = time_index self.column_keys = column_keys self.statistics_file = statistics_file self.ensembles = ensembles self.csvfile_statistics = csvfile_statistics self.csvfile_smry = csvfile_smry self.surface_folders: Union[dict, None] if ensembles is not None: self.emodel = EnsembleSetModel( ensemble_paths={ ens: app.webviz_settings["shared_settings"] ["scratch_ensembles"][ens] for ens in ensembles }) self.pmodel = PropertyStatisticsModel( dataframe=self.emodel.load_csv( csv_file=pathlib.Path(self.statistics_file)), theme=self.theme, ) self.vmodel = SimulationTimeSeriesModel( dataframe=self.emodel.load_smry(time_index=self.time_index, column_keys=self.column_keys), theme=self.theme, ) self.surface_folders = { ens: folder / "share" / "results" / "maps" / ens for ens, folder in self.emodel.ens_folders.items() } else: self.pmodel = PropertyStatisticsModel( dataframe=read_csv(csvfile_statistics), theme=self.theme) self.vmodel = SimulationTimeSeriesModel( dataframe=read_csv(csvfile_smry), theme=self.theme) self.surface_folders = None self.surface_renaming = surface_renaming if surface_renaming else {} self.set_callbacks(app)
def __init__( self, app: dash.Dash, ensembles: list, wells: Optional[List[str]] = None, ): super().__init__() if wells is None: self.column_keys = ["WBHP:*"] else: self.column_keys = [f"WBHP:{well}" for well in wells] self.emodel = EnsembleSetModel( ensemble_paths={ ens: app.webviz_settings["shared_settings"] ["scratch_ensembles"][ens] for ens in ensembles }) self.smry = self.emodel.load_smry(time_index="raw", column_keys=self.column_keys) self.theme = app.webviz_settings["theme"] self.set_callbacks(app)
def __init__( self, app: dash.Dash, csvfile_smry: Path = None, csvfile_parameters: Path = None, ensembles: list = None, column_keys: list = None, initial_vector: str = None, sampling: str = "monthly", line_shape_fallback: str = "linear", ) -> None: super().__init__() self.time_index = sampling self.column_keys = column_keys self.csvfile_smry = csvfile_smry self.csvfile_parameters = csvfile_parameters if csvfile_smry and ensembles: raise ValueError( 'Incorrent arguments. Either provide a "csvfile_smry" and "csvfile_parameters" or ' '"ensembles"') if csvfile_smry and csvfile_parameters: smry = read_csv(csvfile_smry) parameters = read_csv(csvfile_parameters) parameters["SENSTYPE"] = parameters.apply( lambda row: find_sens_type(row.SENSCASE), axis=1) self.smry_meta = None elif ensembles: self.ens_paths = { ensemble: app.webviz_settings["shared_settings"] ["scratch_ensembles"][ensemble] for ensemble in ensembles } self.emodel = EnsembleSetModel(ensemble_paths=self.ens_paths) smry = self.emodel.load_smry(time_index=self.time_index, column_keys=self.column_keys) self.smry_meta = self.emodel.load_smry_meta( column_keys=self.column_keys, ) # Extract realizations and sensitivity information parameters = get_realizations(ensemble_paths=self.ens_paths, ensemble_set_name="EnsembleSet") else: raise ValueError( 'Incorrent arguments. Either provide a "csvfile_smry" and "csvfile_parameters" or ' '"ensembles"') self.data = pd.merge(smry, parameters, on=["ENSEMBLE", "REAL"]) self.smry_cols = [ c for c in self.data.columns if c not in ReservoirSimulationTimeSeriesOneByOne.ENSEMBLE_COLUMNS and not historical_vector(c, self.smry_meta, False) in self.data.columns ] self.initial_vector = (initial_vector if initial_vector and initial_vector in self.smry_cols else self.smry_cols[0]) self.line_shape_fallback = set_simulation_line_shape_fallback( line_shape_fallback) self.tornadoplot = TornadoPlot(app, parameters, allow_click=True) self.uid = uuid4() self.theme = app.webviz_settings["theme"] self.set_callbacks(app)
def __init__( self, app, ensembles: list = None, parameter_csv: Path = None, response_csv: Path = None, response_file: str = None, response_filters: dict = None, response_ignore: list = None, response_include: list = None, parameter_ignore: list = None, column_keys: list = None, sampling: str = "monthly", aggregation: str = "sum", no_responses=False, ): super().__init__() self.parameter_csv = parameter_csv if parameter_csv else None self.response_csv = response_csv if response_csv else None self.response_file = response_file if response_file else None self.response_filters = response_filters if response_filters else {} self.response_ignore = response_ignore if response_ignore else None self.parameter_ignore = parameter_ignore if parameter_ignore else None self.column_keys = column_keys self.time_index = sampling self.aggregation = aggregation self.no_responses = no_responses if response_ignore and response_include: raise ValueError( 'Incorrent argument. Either provide "response_include", ' '"response_ignore" or neither' ) if parameter_csv: if ensembles or response_file: raise ValueError( 'Incorrect arguments. Either provide "parameter_csv" or ' '"ensembles and/or response_file".' ) if not self.no_responses: if self.response_csv: self.responsedf = read_csv(self.response_csv) else: raise ValueError("Incorrect arguments. Missing response_csv.") self.parameterdf = read_csv(self.parameter_csv) elif ensembles: if self.response_csv: raise ValueError( 'Incorrect arguments. Either provide "response_csv" or ' '"ensembles and/or response_file".' ) self.emodel = EnsembleSetModel( ensemble_paths={ ens: app.webviz_settings["shared_settings"]["scratch_ensembles"][ ens ] for ens in ensembles } ) self.parameterdf = self.emodel.load_parameters() if not self.no_responses: if self.response_file: self.responsedf = self.emodel.load_csv(csv_file=response_file) else: self.responsedf = self.emodel.load_smry( time_index=self.time_index, column_keys=self.column_keys ) self.response_filters["DATE"] = "single" else: raise ValueError( "Incorrect arguments." 'You have to define at least "ensembles" or "parameter_csv".' ) if not self.no_responses: self.check_runs() self.check_response_filters() if response_ignore: self.responsedf.drop( response_ignore, errors="ignore", axis=1, inplace=True ) if response_include: self.responsedf.drop( self.responsedf.columns.difference( [ "REAL", "ENSEMBLE", *response_include, *list(response_filters.keys()), ] ), errors="ignore", axis=1, inplace=True, ) if parameter_ignore: self.parameterdf.drop(parameter_ignore, axis=1, inplace=True) # Integer value for each ensemble to be used for ensemble colormap # self.uuid("COLOR") used to mitigate risk of already having a column named "COLOR" in the # DataFrame. self.parameterdf[self.uuid("COLOR")] = self.parameterdf.apply( lambda row: self.ensembles.index(row["ENSEMBLE"]), axis=1 ) self.theme = app.webviz_settings["theme"] self.set_callbacks(app)
def __init__( self, app, parameter_csv: Path = None, response_csv: Path = None, ensembles: list = None, response_file: str = None, response_filters: dict = None, response_ignore: list = None, response_include: list = None, column_keys: list = None, sampling: str = "monthly", aggregation: str = "sum", corr_method: str = "pearson", ): super().__init__() self.parameter_csv = parameter_csv if parameter_csv else None self.response_csv = response_csv if response_csv else None self.response_file = response_file if response_file else None self.response_filters = response_filters if response_filters else {} self.response_ignore = response_ignore if response_ignore else None self.column_keys = column_keys self.time_index = sampling self.corr_method = corr_method self.aggregation = aggregation if response_ignore and response_include: raise ValueError( 'Incorrent argument. either provide "response_include", ' '"response_ignore" or neither') if parameter_csv and response_csv: if ensembles or response_file: raise ValueError( 'Incorrect arguments. Either provide "csv files" or ' '"ensembles and response_file".') self.parameterdf = read_csv(self.parameter_csv) self.responsedf = read_csv(self.response_csv) elif ensembles: self.ens_paths = { ens: app.webviz_settings["shared_settings"] ["scratch_ensembles"][ens] for ens in ensembles } self.parameterdf = load_parameters(ensemble_paths=self.ens_paths, ensemble_set_name="EnsembleSet") if self.response_file: self.responsedf = load_csv( ensemble_paths=self.ens_paths, csv_file=response_file, ensemble_set_name="EnsembleSet", ) else: self.emodel = EnsembleSetModel(ensemble_paths=self.ens_paths) self.responsedf = self.emodel.load_smry( column_keys=self.column_keys, time_index=self.time_index, ) self.response_filters["DATE"] = "single" else: raise ValueError( 'Incorrect arguments. Either provide "csv files" or "ensembles and response_file".' ) self.check_runs() self.check_response_filters() if response_ignore: self.responsedf.drop(response_ignore, errors="ignore", axis=1, inplace=True) if response_include: self.responsedf.drop( self.responsedf.columns.difference([ "REAL", "ENSEMBLE", *response_include, *list(response_filters.keys()), ]), errors="ignore", axis=1, inplace=True, ) self.plotly_theme = app.webviz_settings["theme"].plotly_theme self.uid = uuid4() self.set_callbacks(app)
def __init__( self, app: dash.Dash, csvfile: Path = None, ensembles: list = None, obsfile: Path = None, column_keys: list = None, sampling: str = "monthly", options: dict = None, line_shape_fallback: str = "linear", ): super().__init__() self.csvfile = csvfile self.obsfile = obsfile self.time_index = sampling self.column_keys = column_keys if csvfile and ensembles: raise ValueError( 'Incorrent arguments. Either provide a "csvfile" or "ensembles"' ) self.observations = {} if obsfile: self.observations = check_and_format_observations( get_path(self.obsfile)) self.smry: pd.DataFrame self.smry_meta: Union[pd.DataFrame, None] if csvfile: self.smry = read_csv(csvfile) self.smry_meta = None # Check of time_index for data to use in resampling. Quite naive as it only checks for # unique values of the DATE column, and not per realization. # # Currently not necessary as we don't allow resampling for average rates and intervals # unless we have metadata, which csvfile input currently doesn't support. # See: https://github.com/equinor/webviz-subsurface/issues/402 self.time_index = pd.infer_freq( sorted(pd.to_datetime(self.smry["DATE"]).unique())) elif ensembles: self.emodel = EnsembleSetModel( ensemble_paths={ ens: app.webviz_settings["shared_settings"] ["scratch_ensembles"][ens] for ens in ensembles }) self.smry = self.emodel.load_smry(time_index=self.time_index, column_keys=self.column_keys) self.smry_meta = self.emodel.load_smry_meta( column_keys=self.column_keys, ) else: raise ValueError( 'Incorrent arguments. Either provide a "csvfile" or "ensembles"' ) if any( [col.startswith(("AVG_", "INTVL_")) for col in self.smry.columns]): raise ValueError( "Your data set includes time series vectors which have names starting with" "'AVG_' and/or 'INTVL_'. These prefixes are not allowed, as they are used" "internally in the plugin.") self.smry_cols = [ c for c in self.smry.columns if c not in ReservoirSimulationTimeSeries.ENSEMBLE_COLUMNS and not historical_vector(c, self.smry_meta, False) in self.smry.columns ] self.dropdown_options = [] for vec in self.smry_cols: self.dropdown_options.append({ "label": f"{simulation_vector_description(vec)} ({vec})", "value": vec }) if (self.smry_meta is not None and self.smry_meta.is_total[vec] and self.time_index is not None): # Get the likely name for equivalent rate vector and make dropdown options. # Requires that the time_index was either defined or possible to infer. avgrate_vec = rename_vec_from_cum(vector=vec, as_rate=True) interval_vec = rename_vec_from_cum(vector=vec, as_rate=False) self.dropdown_options.append({ "label": f"{simulation_vector_description(avgrate_vec)} ({avgrate_vec})", "value": avgrate_vec, }) self.dropdown_options.append({ "label": f"{simulation_vector_description(interval_vec)} ({interval_vec})", "value": interval_vec, }) self.ensembles = list(self.smry["ENSEMBLE"].unique()) self.theme = app.webviz_settings["theme"] self.plot_options = options if options else {} self.plot_options["date"] = (str(self.plot_options.get("date")) if self.plot_options.get("date") else None) self.line_shape_fallback = set_simulation_line_shape_fallback( line_shape_fallback) # Check if initially plotted vectors exist in data, raise ValueError if not. missing_vectors = [ value for key, value in self.plot_options.items() if key in ["vector1", "vector2", "vector3"] and value not in self.smry_cols ] if missing_vectors: raise ValueError( f"Cannot find: {', '.join(missing_vectors)} to plot initially in " "ReservoirSimulationTimeSeries. Check that the vectors exist in your data, and " "that they are not missing in a non-default column_keys list in the yaml config " "file.") self.allow_delta = len(self.ensembles) > 1 self.set_callbacks(app)