def ensemble_selector_view(parent: WebvizPluginABC) -> List[Component]: return [ html.Div( [ cyto.Cytoscape( id=parent.uuid("ensemble-selector"), layout={"name": "grid"}, className="ert-ensemble-selector-large", stylesheet=assets.ERTSTYLE["ensemble-selector"]["stylesheet"], responsive=False, ), html.Button( id=parent.uuid("ensemble-selector-button"), className="ert-ensemble-selector-view-toggle", children=("Minimize"), ), ], id=parent.uuid("ensemble-selector-container"), className="ert-ensemble-selector-container-large", ), dcc.Store(id=parent.uuid("ensemble-selection-store"), storage_type="session"), dcc.Store( id=parent.uuid("ensemble-view-store"), storage_type="session", data=True ), ]
def parallel_coordinates_view(parent: WebvizPluginABC) -> Component: return html.Div( className="ert-view-container", children=[ dcc.Graph( id={ "id": parent.uuid("parallel-coor"), "type": parent.uuid("graph"), }, className="ert-view-cell", config={"responsive": True}, ) ], )
def response_obs_view(parent: WebvizPluginABC) -> List[Component]: return [ html.H5("Observation/Misfits plots"), html.Div( className="ert-dropdown-container", children=[ html.Label("Response", className="ert-label"), dcc.Dropdown( id=parent.uuid("response-selector"), className="ert-dropdown", ), ], ), html.Div( [ html.Div( className="ert-graph-options", children=[ html.Label("Y-axis type:"), dcc.RadioItems( options=[{ "label": key, "value": key } for key in ["linear", "log"]], value="linear", id=parent.uuid("yaxis-type"), ), html.Label("Misfits Type:"), dcc.RadioItems( options=[{ "label": key, "value": key } for key in ["Univariate", "Summary"]], value="Univariate", id=parent.uuid("misfits-type"), ), ], ), dcc.Graph( id={ "id": parent.uuid("response-graphic"), "type": parent.uuid("graph"), }, className="ert-graph", ), ], className="ert-graph-container", ), ]
def user_download_data( data_requested, radio_value, realizations_check, realizations_input, ): if data_requested: content = get_data(self.csv_file) if realizations_check: realizations = parse_range(realizations_input) if realizations: content = content[content["realization"].isin( realizations)] if radio_value == "Statistics": filename = "objective_statistics.csv" content = calculate_statistics(content) else: filename = "objective_values.csv" return WebvizPluginABC.plugin_data_compress([{ "filename": filename, "content": content.to_csv(), }]) return ""
def user_download_data(data_requested): if data_requested: return WebvizPluginABC.plugin_data_compress([{ "filename": Path(self.csv_file).name, "content": get_data(self.csv_file).to_csv(), }]) return ""
def response_view(parent: WebvizPluginABC, index: int = 0) -> List[Component]: return [ dcc.Store( id={"index": index, "type": parent.uuid("response-id-store")}, data=index ), dbc.Row( className="ert-plot-options", children=[ dbc.Col( [html.H4(index)], align="center", ), dbc.Col( [ html.Label("Graph Type:", className="ert-label"), ], width="auto", align="center", ), dbc.Col( [ dcc.RadioItems( options=[ {"label": key, "value": key} for key in ["Function plot", "Statistics"] ], value="Function plot", id={"index": index, "type": parent.uuid("plot-type")}, persistence="session", ), ], align="center", ), ], ), dcc.Graph( id={ "index": index, "id": parent.uuid("response-graphic"), "type": parent.uuid("graph"), }, config={"responsive": True}, ), ]
def parameter_selector_view( parent: WebvizPluginABC, data_type: str = "parameter", suffix: str = "" ) -> Component: return html.Div( [ dbc.Row( [ dbc.Col( html.Label( "Search (Press ENTER to select all matches):", className="ert-label", ), align="left", width="auto", ), dbc.Col( dcc.Input( id=parent.uuid(f"parameter-selector-filter-{suffix}"), type="search", placeholder="Substring...", persistence="session", ), align="left", ), dbc.Col( html.Button( id=parent.uuid(f"parameter-selector-button-{suffix}"), children=("Toggle selector visibility"), ), align="right", ), ], ), html.Div( wcc.Select( id=parent.uuid(f"parameter-selector-multi-{suffix}"), multi=True, size=10, persistence="session", ), id=parent.uuid(f"container-parameter-selector-multi-{suffix}"), className="ert-parameter-selector-container-show", ), dcc.Dropdown( id=parent.uuid(f"parameter-deactivator-{suffix}"), multi=True, persistence="session", ), dcc.Store( id=parent.uuid(f"parameter-selection-store-{suffix}"), storage_type="session", ), dcc.Store( id=parent.uuid(f"parameter-type-store-{suffix}"), storage_type="session", data=data_type, ), ], )
def correlation_view(parent: WebvizPluginABC, id_view: str) -> Component: return html.Div( id=id_view, className="ert-view-container", children=[ dcc.Graph( id={ "id": id_view, "type": parent.uuid("graph"), }, className="ert-view-cell", config={"responsive": True}, ) ], )
def user_download_data(data_requested, radio_value): if data_requested: file_path = ( self.statistics_file if radio_value == "Statistics" else self.values_file ) return WebvizPluginABC.plugin_data_compress( [ { "filename": Path(file_path).name, "content": get_data(file_path).to_csv(), } ] ) return ""
def user_download_data(data_requested, radio_value): if data_requested: if radio_value == "Statistics": data = get_data(self.statistics_file).set_index( ["summary_key", "batch", "date"]) file_path = self.statistics_file else: data = get_data(self.values_file).set_index( ["batch", "date"]) file_path = self.values_file return WebvizPluginABC.plugin_data_compress([{ "filename": Path(file_path).name, "content": data.to_csv(), }]) return ""
def parameter_view(parent: WebvizPluginABC, index: int = 0) -> List[Component]: return [ dcc.Store( id={"index": index, "type": parent.uuid("parameter-id-store")}, data=index ), dbc.Row( className="ert-plot-options", children=[ dbc.Col( [ dbc.Row( [ dbc.Col( [html.H4(index)], align="center", ), dbc.Col( [ html.Label("Plots:"), ], width="auto", align="center", ), dbc.Col( [ dcc.Checklist( id={ "index": index, "type": parent.uuid("hist-check"), }, options=[ {"label": "histogram", "value": "hist"}, {"label": "kde", "value": "kde"}, ], value=["hist", "kde"], persistence="session", ), ], align="center", ), ] ) ], width="auto", ), dbc.Col( [ dbc.Row( [ dbc.Col( [ html.Label( "Number of bins:", className="ert-label" ), ], width="auto", ), dbc.Col( [ dcc.Input( id={ "index": index, "type": parent.uuid("hist-bincount"), }, type="number", placeholder="# bins", min=2, debounce=True, ), ] ), ] ) ] ), dcc.Store( id={"index": index, "type": parent.uuid("bincount-store")}, storage_type="session", ), ], ), dcc.Graph( id={ "index": index, "id": parent.uuid("parameter-scatter"), "type": parent.uuid("graph"), }, config={"responsive": True}, style={"height": "450px"}, ), ]
def _user_download_data( data_requested, vector1, vector2, vector3, ensembles, calc_mode, base_ens, delta_ens, visualization, cum_interval, ): """Callback to download data based on selections""" # Combine selected vectors vectors = [vector1] if vector2: vectors.append(vector2) if vector3: vectors.append(vector3) # Ensure selected ensembles is a list and prevent update if invalid calc_mode if calc_mode == "delta_ensembles": ensembles = [base_ens, delta_ens] elif calc_mode == "ensembles": ensembles = ensembles if isinstance(ensembles, list) else [ensembles] else: raise PreventUpdate dfs = calculate_vector_dataframes( smry=self.smry, smry_meta=self.smry_meta, ensembles=ensembles, vectors=vectors, calc_mode=calc_mode, visualization=visualization, time_index=self.time_index, cum_interval=cum_interval, ) for vector, df in dfs.items(): if visualization in ["statistics", "statistics_hist"]: dfs[vector]["stat"] = df["stat"].sort_values( by=[("", "ENSEMBLE"), ("", "DATE")]) if vector.startswith(("AVG_", "INTVL_")): dfs[vector]["stat"]["", "DATE"] = dfs[vector]["stat"][ "", "DATE"].astype(str) dfs[vector]["stat"]["", "DATE"] = dfs[vector]["stat"][ "", "DATE"].apply( date_to_interval_conversion, vector=vector, interval=cum_interval, as_date=False, ) else: dfs[vector]["data"] = df["data"].sort_values( by=["ENSEMBLE", "REAL", "DATE"]) # Reorder columns dfs[vector]["data"] = dfs[vector]["data"][ ["ENSEMBLE", "REAL", "DATE"] + [ col for col in dfs[vector]["data"].columns if col not in ["ENSEMBLE", "REAL", "DATE"] ]] if vector.startswith(("AVG_", "INTVL_")): dfs[vector]["data"]["DATE"] = dfs[vector]["data"][ "DATE"].astype(str) dfs[vector]["data"]["DATE"] = dfs[vector]["data"][ "DATE"].apply( date_to_interval_conversion, vector=vector, interval=cum_interval, as_date=False, ) # : is replaced with _ in filenames to stay within POSIX portable pathnames # (e.g. : is not valid in a Windows path) return (WebvizPluginABC.plugin_data_compress( [{ "filename": f"{vector.replace(':', '_')}.csv", "content": df.get("stat", df["data"]).to_csv(index=False), } for vector, df in dfs.items()]) if data_requested else "")
def _user_download_data( data_requested: Union[int, None], vectors: List[str], selected_ensembles: List[str], visualization_value: str, resampling_frequency_value: str, selected_realizations: List[int], statistics_calculated_from_value: str, delta_ensembles: List[DeltaEnsemble], vector_calculator_expressions: List[ExpressionInfo], ) -> Union[EncodedFile, str]: """Callback to download data based on selections Retrieve vector data based on selected visualizations and filtered realizations NOTE: * Does not group based on "Group By" - data is stored per vector * All statistics included - no filtering on statistics selections * No history vector * No observation data """ if data_requested is None: raise PreventUpdate if not isinstance(selected_ensembles, list): raise TypeError("ensembles should always be of type list") if vectors is None: vectors = initial_selected_vectors # Retrieve the selected expressions selected_expressions = get_selected_expressions( vector_calculator_expressions, vectors ) # Convert from string values to enum types visualization = VisualizationOptions(visualization_value) resampling_frequency = Frequency.from_string_value(resampling_frequency_value) statistics_from_option = StatisticsFromOptions(statistics_calculated_from_value) # Create dict of derived vectors accessors for selected ensembles derived_vectors_accessors: Dict[ str, DerivedVectorsAccessor ] = create_derived_vectors_accessor_dict( ensembles=selected_ensembles, vectors=vectors, provider_set=input_provider_set, expressions=selected_expressions, delta_ensembles=delta_ensembles, resampling_frequency=resampling_frequency, ) # Dict with vector name as key and dataframe data as value vector_dataframe_dict: Dict[str, pd.DataFrame] = {} # Get all realizations if statistics accross all realizations are requested is_statistics_from_all_realizations = ( statistics_from_option == StatisticsFromOptions.ALL_REALIZATIONS and visualization in [ VisualizationOptions.FANCHART, VisualizationOptions.STATISTICS, VisualizationOptions.STATISTICS_AND_REALIZATIONS, ] ) # Plotting per derived vectors accessor for ensemble, accessor in derived_vectors_accessors.items(): # Realization query - realizations query for accessor # - Get non-filter query, None, if statistics from all realizations is needed # - Create valid realizations query for accessor otherwise: # * List[int]: Filtered valid realizations, empty list if none are valid # * None: Get all realizations, i.e. non-filtered query realizations_query = ( None if is_statistics_from_all_realizations else accessor.create_valid_realizations_query(selected_realizations) ) # If all selected realizations are invalid for accessor - empty list if realizations_query == []: continue # Retrive vectors data from accessor vectors_df_list: List[pd.DataFrame] = [] if accessor.has_provider_vectors(): vectors_df_list.append( accessor.get_provider_vectors_df(realizations=realizations_query) ) if accessor.has_interval_and_average_vectors(): vectors_df_list.append( accessor.create_interval_and_average_vectors_df( realizations=realizations_query ) ) if accessor.has_vector_calculator_expressions(): vectors_df_list.append( accessor.create_calculated_vectors_df( realizations=realizations_query ) ) # Append data for each vector for vectors_df in vectors_df_list: vector_names = [ elm for elm in vectors_df.columns if elm not in ["DATE", "REAL"] ] if visualization in [ VisualizationOptions.REALIZATIONS, VisualizationOptions.STATISTICS_AND_REALIZATIONS, ]: # NOTE: Should in theory not have situation with query of all realizations # if not wanted vectors_df_filtered = ( vectors_df if realizations_query else vectors_df[vectors_df["REAL"].isin(selected_realizations)] ) for vector in vector_names: vector_df = vectors_df_filtered[["DATE", "REAL", vector]] row_count = vector_df.shape[0] ensemble_name_list = [ensemble] * row_count vector_df.insert( loc=0, column="ENSEMBLE", value=ensemble_name_list ) if vector.startswith(("AVG_", "INTVL_")): vector_df["DATE"] = vector_df["DATE"].apply( datetime_to_intervalstr, freq=resampling_frequency ) vector_key = vector + "_realizations" if vector_dataframe_dict.get(vector_key) is None: vector_dataframe_dict[vector_key] = vector_df else: vector_dataframe_dict[vector_key] = pd.concat( [vector_dataframe_dict[vector_key], vector_df], ignore_index=True, axis=0, ) if visualization in [ VisualizationOptions.STATISTICS, VisualizationOptions.FANCHART, VisualizationOptions.STATISTICS_AND_REALIZATIONS, ]: vectors_statistics_df = create_vectors_statistics_df(vectors_df) for vector in vector_names: vector_statistics_df = vectors_statistics_df[["DATE", vector]] row_count = vector_statistics_df.shape[0] ensemble_name_list = [ensemble] * row_count vector_statistics_df.insert( loc=0, column="ENSEMBLE", value=ensemble_name_list ) vector_key = vector + "_statistics" if vector.startswith(("AVG_", "INTVL_")): vector_statistics_df.loc[ :, ("DATE", "") ] = vector_statistics_df.loc[:, ("DATE", "")].apply( datetime_to_intervalstr, freq=resampling_frequency ) if vector_dataframe_dict.get(vector_key) is None: vector_dataframe_dict[vector_key] = vector_statistics_df else: vector_dataframe_dict[vector_key] = pd.concat( [ vector_dataframe_dict[vector_key], vector_statistics_df, ], ignore_index=True, axis=0, ) # : is replaced with _ in filenames to stay within POSIX portable pathnames # (e.g. : is not valid in a Windows path) return WebvizPluginABC.plugin_data_compress( [ { "filename": f"{vector.replace(':', '_')}.csv", "content": df.to_csv(index=False), } for vector, df in vector_dataframe_dict.items() ] )
def _user_download_data( data_requested: Union[int, None], vectors: List[str], ensembles: List[str], calc_mode: str, base_ens: str, delta_ens: str, visualization: str, cum_interval: str, ) -> Union[EncodedFile, str]: """Callback to download data based on selections""" if data_requested is None: raise PreventUpdate # Ensure selected ensembles is a list and prevent update if invalid calc_mode if calc_mode == "delta_ensembles": ensembles = [base_ens, delta_ens] elif calc_mode == "ensembles": if not isinstance(ensembles, list): raise TypeError("ensembles should always be of type list") else: raise PreventUpdate if vectors is None: vectors = self.plot_options.get("vectors", [self.smry_cols[0]]) dfs = calculate_vector_dataframes( smry=self.smry, smry_meta=self.smry_meta, ensembles=ensembles, vectors=vectors, calc_mode=calc_mode, visualization=visualization, time_index=self.time_index, cum_interval=cum_interval, ) for vector, df in dfs.items(): if visualization in ["fanchart", "statistics"]: df["stat"] = df["stat"].sort_values( by=[("", "ENSEMBLE"), ("", "DATE")]) if vector.startswith(("AVG_", "INTVL_")): df["stat"]["", "DATE"] = df["stat"]["", "DATE"].astype(str) df["stat"]["", "DATE"] = df["stat"]["", "DATE"].apply( date_to_interval_conversion, vector=vector, interval=cum_interval, as_date=False, ) else: df["data"] = df["data"].sort_values( by=["ENSEMBLE", "REAL", "DATE"]) # Reorder columns df["data"] = df["data"][["ENSEMBLE", "REAL", "DATE"] + [ col for col in df["data"].columns if col not in ["ENSEMBLE", "REAL", "DATE"] ]] if vector.startswith(("AVG_", "INTVL_")): df["data"]["DATE"] = df["data"]["DATE"].astype(str) df["data"]["DATE"] = df["data"]["DATE"].apply( date_to_interval_conversion, vector=vector, interval=cum_interval, as_date=False, ) # : is replaced with _ in filenames to stay within POSIX portable pathnames # (e.g. : is not valid in a Windows path) return WebvizPluginABC.plugin_data_compress([{ "filename": f"{vector.replace(':', '_')}.csv", "content": df.get("stat", df["data"]).to_csv(index=False), } for vector, df in dfs.items()])