Beispiel #1
0
    def __init__(
        self,
        app: dash.Dash,
        webviz_settings: WebvizSettings,
        ensembles: list,
        gruptree_file: str = "share/results/tables/gruptree.csv",
        rel_file_pattern: str = "share/results/unsmry/*.arrow",
        time_index: str = "yearly",
    ):
        super().__init__()
        assert time_index in [
            "monthly",
            "yearly",
        ], "time_index must be monthly or yearly"
        self._ensembles = ensembles
        self._gruptree_file = gruptree_file

        if ensembles is None:
            raise ValueError('Incorrect argument, must provide "ensembles"')

        sampling = Frequency(time_index)

        self._ensemble_paths: Dict[str, Path] = {
            ensemble_name:
            webviz_settings.shared_settings["scratch_ensembles"][ensemble_name]
            for ensemble_name in ensembles
        }

        provider_factory = EnsembleSummaryProviderFactory.instance()

        self._group_tree_data: Dict[str, EnsembleGroupTreeData] = {}

        sampling = Frequency(time_index)
        for ens_name, ens_path in self._ensemble_paths.items():
            provider: EnsembleSummaryProvider = (
                provider_factory.create_from_arrow_unsmry_presampled(
                    str(ens_path), rel_file_pattern, sampling))
            self._group_tree_data[ens_name] = EnsembleGroupTreeData(
                provider, GruptreeModel(ens_name, ens_path, gruptree_file))

        self.set_callbacks(app)
Beispiel #2
0
    def _update_relative_date_dropdown(
        resampling_frequency_value: str,
        current_relative_date_options: List[dict],
        current_relative_date_value: Optional[str],
    ) -> Tuple[List[Dict[str, str]], Optional[str]]:
        """This callback updates dropdown based on selected resampling frequency selection

        If dates are not existing for a provider, the data accessor must handle invalid
        relative date selection!
        """
        resampling_frequency = Frequency.from_string_value(
            resampling_frequency_value)
        dates_union = input_provider_set.all_dates(resampling_frequency)

        # Create dropdown options:
        new_relative_date_options: List[Dict[str, str]] = [{
            "label":
            datetime_utils.to_str(_date),
            "value":
            datetime_utils.to_str(_date),
        } for _date in dates_union]

        # Create valid dropdown value:
        new_relative_date_value = next(
            (elm["value"] for elm in new_relative_date_options
             if elm["value"] == current_relative_date_value),
            None,
        )

        # Prevent updates if unchanged
        if new_relative_date_options == current_relative_date_options:
            new_relative_date_options = dash.no_update
        if new_relative_date_value == current_relative_date_value:
            new_relative_date_value = dash.no_update

        return new_relative_date_options, new_relative_date_value
Beispiel #3
0
    def __init__(
        self,
        app: dash.Dash,
        webviz_settings: WebvizSettings,
        ensembles: Optional[list] = None,
        rel_file_pattern: str = "share/results/unsmry/*.arrow",
        perform_presampling: bool = False,
        obsfile: Path = None,
        options: dict = None,
        sampling: str = Frequency.MONTHLY.value,
        predefined_expressions: str = None,
        user_defined_vector_definitions: str = None,
        line_shape_fallback: str = "linear",
    ) -> None:
        super().__init__()

        # NOTE: Temporary css, pending on new wcc modal component.
        # See: https://github.com/equinor/webviz-core-components/issues/163
        WEBVIZ_ASSETS.add(
            Path(webviz_subsurface.__file__).parent / "_assets" / "css" /
            "modal.css")

        self._webviz_settings = webviz_settings
        self._obsfile = obsfile

        # Retrieve user defined vector descriptions from configuration and validate
        self._user_defined_vector_descriptions_path = (
            None if user_defined_vector_definitions is None else
            webviz_settings.shared_settings["user_defined_vector_definitions"]
            [user_defined_vector_definitions])
        self._user_defined_vector_definitions: Dict[
            str, wsc.
            VectorDefinition] = create_user_defined_vector_descriptions_from_config(
                get_path(self._user_defined_vector_descriptions_path) if self.
                _user_defined_vector_descriptions_path else None)
        self._custom_vector_definitions = copy.deepcopy(
            self._user_defined_vector_definitions)

        self._line_shape_fallback = set_simulation_line_shape_fallback(
            line_shape_fallback)

        # Must define valid freqency!
        if Frequency.from_string_value(sampling) is None:
            raise ValueError(
                'Sampling frequency conversion is "None", i.e. Raw sampling, and '
                "is not supported by plugin yet!")
        self._sampling = Frequency(sampling)
        self._presampled_frequency = None

        # TODO: Update functionality when allowing raw data and csv file input
        # NOTE: If csv is implemented-> handle/disable statistics, PER_INTVL_, PER_DAY_, delta
        # ensemble, etc.
        if ensembles is not None:
            ensemble_paths: Dict[str, Path] = {
                ensemble_name:
                webviz_settings.shared_settings["scratch_ensembles"]
                [ensemble_name]
                for ensemble_name in ensembles
            }
            if perform_presampling:
                self._presampled_frequency = self._sampling
                self._input_provider_set = create_presampled_provider_set_from_paths(
                    ensemble_paths, rel_file_pattern,
                    self._presampled_frequency)
            else:
                self._input_provider_set = create_lazy_provider_set_from_paths(
                    ensemble_paths, rel_file_pattern)
        else:
            raise ValueError('Incorrect argument, must provide "ensembles"')

        if not self._input_provider_set:
            raise ValueError(
                "Initial provider set is undefined, and ensemble summary providers"
                " are not instanciated for plugin")

        self._theme = webviz_settings.theme

        self._observations = {}
        if self._obsfile:
            self._observations = check_and_format_observations(
                get_path(self._obsfile))

        # NOTE: Initially keep set of all vector names - can make dynamic if wanted?
        vector_names = self._input_provider_set.all_vector_names()
        non_historical_vector_names = [
            vector for vector in vector_names
            if historical_vector(vector, None, False) not in vector_names
        ]

        # NOTE: Initially: With set of vector names, the vector selector data is static
        # Can be made dynamic based on selected ensembles - i.e. vectors present among
        # selected providers?
        self._vector_selector_base_data: list = []
        self._vector_calculator_data: list = []
        for vector in non_historical_vector_names:
            add_vector_to_vector_selector_data(
                self._vector_selector_base_data,
                vector,
            )

            # Only vectors from providers are provided to vector calculator
            add_vector_to_vector_selector_data(
                self._vector_calculator_data,
                vector,
            )

            metadata = (self._input_provider_set.vector_metadata(vector)
                        if self._input_provider_set else None)
            if metadata and metadata.is_total:
                # Get the likely name for equivalent rate vector and make dropdown options.
                # Requires that the time_index was either defined or possible to infer.
                per_day_vec = create_per_day_vector_name(vector)
                per_intvl_vec = create_per_interval_vector_name(vector)

                add_vector_to_vector_selector_data(
                    self._vector_selector_base_data,
                    per_day_vec,
                )
                add_vector_to_vector_selector_data(
                    self._vector_selector_base_data,
                    per_intvl_vec,
                )

                # Add vector base to custom vector definition if not existing
                vector_base = vector.split(":")[0]
                _definition = wsc.VectorDefinitions.get(vector_base, None)
                _type = _definition["type"] if _definition else "others"

                per_day_vec_base = per_day_vec.split(":")[0]
                per_intvl_vec_base = per_intvl_vec.split(":")[0]
                if per_day_vec_base not in self._custom_vector_definitions:
                    self._custom_vector_definitions[
                        per_day_vec_base] = wsc.VectorDefinition(
                            type=_type,
                            description=simulation_vector_description(
                                per_day_vec_base,
                                self._user_defined_vector_definitions),
                        )
                if per_intvl_vec_base not in self._custom_vector_definitions:
                    self._custom_vector_definitions[
                        per_intvl_vec_base] = wsc.VectorDefinition(
                            type=_type,
                            description=simulation_vector_description(
                                per_intvl_vec_base,
                                self._user_defined_vector_definitions),
                        )

        # Retreive predefined expressions from configuration and validate
        self._predefined_expressions_path = (
            None if predefined_expressions is None else webviz_settings.
            shared_settings["predefined_expressions"][predefined_expressions])
        self._predefined_expressions = expressions_from_config(
            get_path(self._predefined_expressions_path) if self.
            _predefined_expressions_path else None)
        for expression in self._predefined_expressions:
            valid, message = validate_predefined_expression(
                expression, self._vector_selector_base_data)
            if not valid:
                warnings.warn(message)
            expression["isValid"] = valid

        # Add expressions to custom vector definitions
        self._custom_vector_definitions_base = copy.deepcopy(
            self._custom_vector_definitions)
        _custom_vector_definitions_from_expressions = (
            get_vector_definitions_from_expressions(
                self._predefined_expressions))
        for key, value in _custom_vector_definitions_from_expressions.items():
            if key not in self._custom_vector_definitions:
                self._custom_vector_definitions[key] = value

        # Create initial vector selector data with predefined expressions
        self._initial_vector_selector_data = copy.deepcopy(
            self._vector_selector_base_data)
        add_expressions_to_vector_selector_data(
            self._initial_vector_selector_data, self._predefined_expressions)

        plot_options = options if options else {}
        self._initial_visualization_selection = VisualizationOptions(
            plot_options.get("visualization", "statistics"))

        # Initial selected vectors - NB: {vector1, vector2, vector3} is deprecated!
        initial_vectors: List[str] = plot_options.get("vectors", [])

        # TODO: Remove when depretaced code is not utilized anymore
        if "vectors" in plot_options and any(
                elm in plot_options
                for elm in ["vector1", "vector2", "vector3"]):
            warnings.warn(
                'Providing new user input option "vectors" and deprecated user input options '
                '"vector1", "vector2" and "vector3" simultaneously. Initially selected vectors '
                'for plugin are set equal to new user input option "vectors".')
        if not initial_vectors:
            initial_vectors = [
                plot_options[elm] for elm in ["vector1", "vector2", "vector3"]
                if elm in plot_options
            ][:3]

        # Check if initially selected vectors exist in data, raise ValueError if not
        missing_vectors = [
            elm for elm in initial_vectors
            if not is_vector_name_in_vector_selector_data(
                elm, self._initial_vector_selector_data)
        ]
        if missing_vectors:
            raise ValueError(
                f"Cannot find: {', '.join(missing_vectors)} to plot initially in "
                "SimulationTimeSeries. Check that the vector(s) exist in your data."
            )

        if len(initial_vectors) > 3:
            warnings.warn(
                'User input option "vectors" contains more than 3 vectors. Only the first 3 listed '
                "vectors are kept for initially selected vectors - the remaining are neglected."
            )
        self._initial_vectors = initial_vectors[:3]

        # Set callbacks
        self.set_callbacks(app)
    def __init__(
        self,
        app: Dash,
        webviz_settings: WebvizSettings,
        ensembles: Optional[list] = None,
        rel_file_pattern: str = "share/results/unsmry/*.arrow",
        statistics_file: str = "share/results/tables/gridpropstatistics.csv",
        surface_renaming: Optional[dict] = None,
        time_index: str = "monthly",
        column_keys: Optional[list] = None,
        csvfile_statistics: Path = None,
        csvfile_smry: Path = None,
    ):
        super().__init__()
        self.theme: WebvizConfigTheme = webviz_settings.theme
        self.ensembles = ensembles
        self._surface_folders: Union[dict, None] = None
        self._vmodel: Optional[Union[SimulationTimeSeriesModel,
                                     ProviderTimeSeriesDataModel]] = None
        run_mode_portable = WEBVIZ_INSTANCE_INFO.run_mode == WebvizRunMode.PORTABLE
        table_provider = EnsembleTableProviderFactory.instance()

        if ensembles is not None:
            ensemble_paths = {
                ensemble_name:
                webviz_settings.shared_settings["scratch_ensembles"]
                [ensemble_name]
                for ensemble_name in ensembles
            }

            resampling_frequency = Frequency(time_index)
            provider_factory = EnsembleSummaryProviderFactory.instance()

            try:
                provider_set = {
                    ens: provider_factory.create_from_arrow_unsmry_presampled(
                        str(ens_path), rel_file_pattern, resampling_frequency)
                    for ens, ens_path in ensemble_paths.items()
                }
                self._vmodel = ProviderTimeSeriesDataModel(
                    provider_set=provider_set, column_keys=column_keys)
                property_df = create_df_from_table_provider(
                    table_provider.
                    create_provider_set_from_per_realization_csv_file(
                        ensemble_paths, statistics_file))
            except ValueError as error:
                message = (
                    f"Some/all ensembles are missing arrow files at {rel_file_pattern}.\n"
                    "If no arrow files have been generated with `ERT` using `ECL2CSV`, "
                    "the commandline tool `smry2arrow_batch` can be used to generate arrow "
                    "files for an ensemble")
                if not run_mode_portable:
                    raise ValueError(message) from error

                # NOTE: this part below is to ensure backwards compatibility for portable app's
                # created before the arrow support. It should be removed in the future.
                emodel: EnsembleSetModel = (
                    caching_ensemble_set_model_factory.get_or_create_model(
                        ensemble_paths=ensemble_paths,
                        time_index=time_index,
                        column_keys=column_keys,
                    ))
                self._vmodel = SimulationTimeSeriesModel(
                    dataframe=emodel.get_or_load_smry_cached())
                property_df = emodel.load_csv(csv_file=Path(statistics_file))

            self._surface_folders = {
                ens: Path(ens_path.split("realization")[0]) /
                "share/results/maps" / ens
                for ens, ens_path in ensemble_paths.items()
            }

        else:
            if csvfile_statistics is None:
                raise ValueError(
                    "If not 'ensembles', then csvfile_statistics must be provided"
                )
            # NOTE: the try/except is for backwards compatibility with existing portable app's.
            # It should be removed in the future together with the support of aggregated csv-files
            try:
                property_df = create_df_from_table_provider(
                    table_provider.
                    create_provider_set_from_aggregated_csv_file(
                        csvfile_statistics))
            except FileNotFoundError:
                if not run_mode_portable:
                    raise
                property_df = read_csv(csvfile_statistics)

            if csvfile_smry is not None:
                try:
                    smry_df = create_df_from_table_provider(
                        table_provider.
                        create_provider_set_from_aggregated_csv_file(
                            csvfile_smry))
                except FileNotFoundError:
                    if not run_mode_portable:
                        raise
                    smry_df = read_csv(csvfile_smry)

                self._vmodel = SimulationTimeSeriesModel(dataframe=smry_df)

        self._pmodel = PropertyStatisticsModel(dataframe=property_df,
                                               theme=self.theme)

        self._surface_renaming = surface_renaming if surface_renaming else {}
        self._surface_table = generate_surface_table(
            statistics_dframe=self._pmodel.dataframe,
            ensembles=self._pmodel.ensembles,
            surface_folders=self._surface_folders,
            surface_renaming=self._surface_renaming,
        )
        self.set_callbacks(app)
    def __init__(
        self,
        app,
        webviz_settings: WebvizSettings,
        parameter_csv: Path = None,
        response_csv: Path = None,
        ensembles: list = None,
        rel_file_pattern: str = "share/results/unsmry/*.arrow",
        response_file: str = None,
        response_filters: dict = None,
        response_ignore: list = None,
        response_include: list = None,
        column_keys: list = None,
        sampling: str = "monthly",
        aggregation: str = "sum",
        corr_method: str = "pearson",
    ):

        super().__init__()

        self.parameter_csv = parameter_csv if parameter_csv else None
        self.response_csv = response_csv if response_csv else None
        self.response_file = response_file if response_file else None
        self.response_filters = response_filters if response_filters else {}
        self.column_keys = column_keys
        self._sampling = Frequency(sampling)
        self.corr_method = corr_method
        self.aggregation = aggregation
        if response_ignore and response_include:
            raise ValueError(
                'Incorrent argument. either provide "response_include", '
                '"response_ignore" or neither')
        if parameter_csv and response_csv:
            if ensembles or response_file:
                raise ValueError(
                    'Incorrect arguments. Either provide "csv files" or '
                    '"ensembles and response_file".')
            parameterdf = read_csv(self.parameter_csv)
            self.responsedf = read_csv(self.response_csv)

        elif ensembles:
            self.ens_paths = {
                ens: webviz_settings.shared_settings["scratch_ensembles"][ens]
                for ens in ensembles
            }
            table_provider_factory = EnsembleTableProviderFactory.instance()
            parameterdf = create_df_from_table_provider(
                table_provider_factory.
                create_provider_set_from_per_realization_parameter_file(
                    self.ens_paths))
            if self.response_file:
                self.responsedf = load_csv(
                    ensemble_paths=self.ens_paths,
                    csv_file=response_file,
                    ensemble_set_name="EnsembleSet",
                )
            else:
                smry_provider_factory = EnsembleSummaryProviderFactory.instance(
                )
                provider_set = {
                    ens_name:
                    smry_provider_factory.create_from_arrow_unsmry_presampled(
                        ens_path, rel_file_pattern, self._sampling)
                    for ens_name, ens_path in self.ens_paths.items()
                }
                self.response_filters["DATE"] = "single"
                self.responsedf = create_df_from_summary_provider(
                    provider_set,
                    self.column_keys,
                )
        else:
            raise ValueError(
                'Incorrect arguments. Either provide "csv files" or "ensembles and response_file".'
            )
        pmodel = ParametersModel(dataframe=parameterdf,
                                 keep_numeric_only=True,
                                 drop_constants=True)
        self.parameterdf = pmodel.dataframe
        self.parameter_columns = pmodel.parameters

        parresp.check_runs(self.parameterdf, self.responsedf)
        parresp.check_response_filters(self.responsedf, self.response_filters)

        # Only select numerical responses
        self.response_columns = parresp.filter_numerical_columns(
            df=self.responsedf,
            column_ignore=response_ignore,
            column_include=response_include,
            filter_columns=self.response_filters.keys(),
        )

        self.theme = webviz_settings.theme
        self.set_callbacks(app)
Beispiel #6
0
    def _user_download_data(
        data_requested: Union[int, None],
        vectors: List[str],
        selected_ensembles: List[str],
        visualization_value: str,
        resampling_frequency_value: str,
        selected_realizations: List[int],
        statistics_calculated_from_value: str,
        delta_ensembles: List[DeltaEnsemble],
        vector_calculator_expressions: List[ExpressionInfo],
    ) -> Union[EncodedFile, str]:
        """Callback to download data based on selections

        Retrieve vector data based on selected visualizations and filtered realizations

        NOTE:
        * Does not group based on "Group By" - data is stored per vector
        * All statistics included - no filtering on statistics selections
        * No history vector
        * No observation data
        """
        if data_requested is None:
            raise PreventUpdate

        if not isinstance(selected_ensembles, list):
            raise TypeError("ensembles should always be of type list")

        if vectors is None:
            vectors = initial_selected_vectors

        # Retrieve the selected expressions
        selected_expressions = get_selected_expressions(
            vector_calculator_expressions, vectors
        )

        # Convert from string values to enum types
        visualization = VisualizationOptions(visualization_value)
        resampling_frequency = Frequency.from_string_value(resampling_frequency_value)
        statistics_from_option = StatisticsFromOptions(statistics_calculated_from_value)

        # Create dict of derived vectors accessors for selected ensembles
        derived_vectors_accessors: Dict[
            str, DerivedVectorsAccessor
        ] = create_derived_vectors_accessor_dict(
            ensembles=selected_ensembles,
            vectors=vectors,
            provider_set=input_provider_set,
            expressions=selected_expressions,
            delta_ensembles=delta_ensembles,
            resampling_frequency=resampling_frequency,
        )

        # Dict with vector name as key and dataframe data as value
        vector_dataframe_dict: Dict[str, pd.DataFrame] = {}

        # Get all realizations if statistics accross all realizations are requested
        is_statistics_from_all_realizations = (
            statistics_from_option == StatisticsFromOptions.ALL_REALIZATIONS
            and visualization
            in [
                VisualizationOptions.FANCHART,
                VisualizationOptions.STATISTICS,
                VisualizationOptions.STATISTICS_AND_REALIZATIONS,
            ]
        )

        # Plotting per derived vectors accessor
        for ensemble, accessor in derived_vectors_accessors.items():
            # Realization query - realizations query for accessor
            # - Get non-filter query, None, if statistics from all realizations is needed
            # - Create valid realizations query for accessor otherwise:
            #   * List[int]: Filtered valid realizations, empty list if none are valid
            #   * None: Get all realizations, i.e. non-filtered query
            realizations_query = (
                None
                if is_statistics_from_all_realizations
                else accessor.create_valid_realizations_query(selected_realizations)
            )

            # If all selected realizations are invalid for accessor - empty list
            if realizations_query == []:
                continue

            # Retrive vectors data from accessor
            vectors_df_list: List[pd.DataFrame] = []
            if accessor.has_provider_vectors():
                vectors_df_list.append(
                    accessor.get_provider_vectors_df(realizations=realizations_query)
                )
            if accessor.has_interval_and_average_vectors():
                vectors_df_list.append(
                    accessor.create_interval_and_average_vectors_df(
                        realizations=realizations_query
                    )
                )
            if accessor.has_vector_calculator_expressions():
                vectors_df_list.append(
                    accessor.create_calculated_vectors_df(
                        realizations=realizations_query
                    )
                )

            # Append data for each vector
            for vectors_df in vectors_df_list:
                vector_names = [
                    elm for elm in vectors_df.columns if elm not in ["DATE", "REAL"]
                ]

                if visualization in [
                    VisualizationOptions.REALIZATIONS,
                    VisualizationOptions.STATISTICS_AND_REALIZATIONS,
                ]:
                    # NOTE: Should in theory not have situation with query of all realizations
                    # if not wanted
                    vectors_df_filtered = (
                        vectors_df
                        if realizations_query
                        else vectors_df[vectors_df["REAL"].isin(selected_realizations)]
                    )
                    for vector in vector_names:
                        vector_df = vectors_df_filtered[["DATE", "REAL", vector]]
                        row_count = vector_df.shape[0]
                        ensemble_name_list = [ensemble] * row_count
                        vector_df.insert(
                            loc=0, column="ENSEMBLE", value=ensemble_name_list
                        )

                        if vector.startswith(("AVG_", "INTVL_")):
                            vector_df["DATE"] = vector_df["DATE"].apply(
                                datetime_to_intervalstr, freq=resampling_frequency
                            )

                        vector_key = vector + "_realizations"
                        if vector_dataframe_dict.get(vector_key) is None:
                            vector_dataframe_dict[vector_key] = vector_df
                        else:
                            vector_dataframe_dict[vector_key] = pd.concat(
                                [vector_dataframe_dict[vector_key], vector_df],
                                ignore_index=True,
                                axis=0,
                            )

                if visualization in [
                    VisualizationOptions.STATISTICS,
                    VisualizationOptions.FANCHART,
                    VisualizationOptions.STATISTICS_AND_REALIZATIONS,
                ]:
                    vectors_statistics_df = create_vectors_statistics_df(vectors_df)

                    for vector in vector_names:
                        vector_statistics_df = vectors_statistics_df[["DATE", vector]]
                        row_count = vector_statistics_df.shape[0]
                        ensemble_name_list = [ensemble] * row_count
                        vector_statistics_df.insert(
                            loc=0, column="ENSEMBLE", value=ensemble_name_list
                        )

                        vector_key = vector + "_statistics"

                        if vector.startswith(("AVG_", "INTVL_")):
                            vector_statistics_df.loc[
                                :, ("DATE", "")
                            ] = vector_statistics_df.loc[:, ("DATE", "")].apply(
                                datetime_to_intervalstr, freq=resampling_frequency
                            )
                        if vector_dataframe_dict.get(vector_key) is None:
                            vector_dataframe_dict[vector_key] = vector_statistics_df
                        else:
                            vector_dataframe_dict[vector_key] = pd.concat(
                                [
                                    vector_dataframe_dict[vector_key],
                                    vector_statistics_df,
                                ],
                                ignore_index=True,
                                axis=0,
                            )

        # : is replaced with _ in filenames to stay within POSIX portable pathnames
        # (e.g. : is not valid in a Windows path)
        return WebvizPluginABC.plugin_data_compress(
            [
                {
                    "filename": f"{vector.replace(':', '_')}.csv",
                    "content": df.to_csv(index=False),
                }
                for vector, df in vector_dataframe_dict.items()
            ]
        )
Beispiel #7
0
    def _update_graph(
        vectors: List[str],
        selected_ensembles: List[str],
        visualization_value: str,
        statistics_option_values: List[str],
        fanchart_option_values: List[str],
        trace_option_values: List[str],
        subplot_owner_options_value: str,
        resampling_frequency_value: str,
        selected_realizations: List[int],
        statistics_calculated_from_value: str,
        __graph_data_has_changed_trigger: int,
        delta_ensembles: List[DeltaEnsemble],
        vector_calculator_expressions: List[ExpressionInfo],
        ensemble_dropdown_options: List[dict],
    ) -> dict:
        """Callback to update all graphs based on selections

        * De-serialize from JSON serializable format to strongly typed and filtered format
        * Business logic:
            * Functionality with "strongly typed" and filtered input format - functions and
            classes
            * ProviderSet for EnsembleSummaryProviders, i.e. input_provider_set
            * DerivedEnsembleVectorsAccessor to access derived vector data from ensembles
            with single providers or delta ensemble with two providers
            * GraphFigureBuilder to create graph with subplots per vector or subplots per
            ensemble, using VectorSubplotBuilder and EnsembleSubplotBuilder, respectively
        * Create/build property serialization in FigureBuilder by use of business logic data

        NOTE: __graph_data_has_changed_trigger is only used to trigger callback when change of
        graphs data has changed and re-render of graph is necessary. E.g. when a selected expression
        from the VectorCalculatorgets edited, without changing the expression name - i.e.
        VectorSelector selectedNodes remain unchanged.
        """
        if not isinstance(selected_ensembles, list):
            raise TypeError("ensembles should always be of type list")

        if vectors is None:
            vectors = initial_selected_vectors

        # Retrieve the selected expressions
        selected_expressions = get_selected_expressions(
            vector_calculator_expressions, vectors
        )

        # Convert from string values to enum types
        visualization = VisualizationOptions(visualization_value)
        statistics_options = [
            StatisticsOptions(elm) for elm in statistics_option_values
        ]
        fanchart_options = [FanchartOptions(elm) for elm in fanchart_option_values]
        trace_options = [TraceOptions(elm) for elm in trace_option_values]
        subplot_owner = SubplotGroupByOptions(subplot_owner_options_value)
        resampling_frequency = Frequency.from_string_value(resampling_frequency_value)
        all_ensemble_names = [option["value"] for option in ensemble_dropdown_options]
        statistics_from_option = StatisticsFromOptions(statistics_calculated_from_value)

        # Prevent update if realization filtering is not affecting pure statistics plot
        # TODO: Refactor code or create utility for getting trigger ID in a "cleaner" way?
        ctx = dash.callback_context.triggered
        trigger_id = ctx[0]["prop_id"].split(".")[0]
        if (
            trigger_id == get_uuid(LayoutElements.REALIZATIONS_FILTER_SELECTOR)
            and statistics_from_option is StatisticsFromOptions.ALL_REALIZATIONS
            and visualization
            in [
                VisualizationOptions.STATISTICS,
                VisualizationOptions.FANCHART,
            ]
        ):
            raise PreventUpdate

        # Create dict of derived vectors accessors for selected ensembles
        derived_vectors_accessors: Dict[
            str, DerivedVectorsAccessor
        ] = create_derived_vectors_accessor_dict(
            ensembles=selected_ensembles,
            vectors=vectors,
            provider_set=input_provider_set,
            expressions=selected_expressions,
            delta_ensembles=delta_ensembles,
            resampling_frequency=resampling_frequency,
        )

        # TODO: How to get metadata for calculated vector?
        vector_line_shapes: Dict[str, str] = {
            vector: get_simulation_line_shape(
                line_shape_fallback,
                vector,
                input_provider_set.vector_metadata(vector),
            )
            for vector in vectors
        }

        figure_builder: GraphFigureBuilderBase
        if subplot_owner is SubplotGroupByOptions.VECTOR:
            # Create unique colors based on all ensemble names to preserve consistent colors
            ensemble_colors = unique_colors(all_ensemble_names, theme)
            vector_titles = create_vector_plot_titles_from_provider_set(
                vectors, selected_expressions, input_provider_set
            )
            figure_builder = VectorSubplotBuilder(
                vectors,
                vector_titles,
                ensemble_colors,
                resampling_frequency,
                vector_line_shapes,
                theme,
            )
        elif subplot_owner is SubplotGroupByOptions.ENSEMBLE:
            vector_colors = unique_colors(vectors, theme)
            figure_builder = EnsembleSubplotBuilder(
                vectors,
                selected_ensembles,
                vector_colors,
                resampling_frequency,
                vector_line_shapes,
                theme,
            )
        else:
            raise PreventUpdate

        # Get all realizations if statistics accross all realizations are requested
        is_statistics_from_all_realizations = (
            statistics_from_option == StatisticsFromOptions.ALL_REALIZATIONS
            and visualization
            in [
                VisualizationOptions.FANCHART,
                VisualizationOptions.STATISTICS,
                VisualizationOptions.STATISTICS_AND_REALIZATIONS,
            ]
        )

        # Plotting per derived vectors accessor
        for ensemble, accessor in derived_vectors_accessors.items():
            # Realization query - realizations query for accessor
            # - Get non-filter query, None, if statistics from all realizations is needed
            # - Create valid realizations query for accessor otherwise:
            #   * List[int]: Filtered valid realizations, empty list if none are valid
            #   * None: Get all realizations, i.e. non-filtered query
            realizations_query = (
                None
                if is_statistics_from_all_realizations
                else accessor.create_valid_realizations_query(selected_realizations)
            )

            # If all selected realizations are invalid for accessor - empty list
            if realizations_query == []:
                continue

            # TODO: Consider to remove list vectors_df_list and use pd.concat to obtain
            # one single dataframe with vector columns. NB: Assumes equal sampling rate
            # for each vector type - i.e equal number of rows in dataframes

            # Retrive vectors data from accessor
            vectors_df_list: List[pd.DataFrame] = []
            if accessor.has_provider_vectors():
                vectors_df_list.append(
                    accessor.get_provider_vectors_df(realizations=realizations_query)
                )
            if accessor.has_interval_and_average_vectors():
                vectors_df_list.append(
                    accessor.create_interval_and_average_vectors_df(
                        realizations=realizations_query
                    )
                )
            if accessor.has_vector_calculator_expressions():
                vectors_df_list.append(
                    accessor.create_calculated_vectors_df(
                        realizations=realizations_query
                    )
                )

            for vectors_df in vectors_df_list:
                if visualization == VisualizationOptions.REALIZATIONS:
                    # Show selected realizations - only filter df if realizations filter
                    # query is not performed
                    figure_builder.add_realizations_traces(
                        vectors_df
                        if realizations_query
                        else vectors_df[vectors_df["REAL"].isin(selected_realizations)],
                        ensemble,
                    )
                if visualization == VisualizationOptions.STATISTICS:
                    vectors_statistics_df = create_vectors_statistics_df(vectors_df)
                    figure_builder.add_statistics_traces(
                        vectors_statistics_df,
                        ensemble,
                        statistics_options,
                    )
                if visualization == VisualizationOptions.FANCHART:
                    vectors_statistics_df = create_vectors_statistics_df(vectors_df)
                    figure_builder.add_fanchart_traces(
                        vectors_statistics_df,
                        ensemble,
                        fanchart_options,
                    )
                if visualization == VisualizationOptions.STATISTICS_AND_REALIZATIONS:
                    # Configure line width and color scaling to easier separate
                    # statistics traces and realization traces.
                    # Show selected realizations - only filter df if realizations filter
                    # query is not performed
                    figure_builder.add_realizations_traces(
                        vectors_df
                        if realizations_query
                        else vectors_df[vectors_df["REAL"].isin(selected_realizations)],
                        ensemble,
                        color_lightness_scale=150.0,
                    )
                    # Add statistics on top
                    vectors_statistics_df = create_vectors_statistics_df(vectors_df)
                    figure_builder.add_statistics_traces(
                        vectors_statistics_df,
                        ensemble,
                        statistics_options,
                        line_width=3,
                    )

        # Retrieve selected input providers
        selected_input_providers = ProviderSet(
            {
                name: provider
                for name, provider in input_provider_set.items()
                if name in selected_ensembles
            }
        )

        # Do not add observations if only delta ensembles are selected
        is_only_delta_ensembles = (
            len(selected_input_providers.names()) == 0
            and len(derived_vectors_accessors) > 0
        )
        if (
            observations
            and TraceOptions.OBSERVATIONS in trace_options
            and not is_only_delta_ensembles
        ):
            for vector in vectors:
                vector_observations = observations.get(vector)
                if vector_observations:
                    figure_builder.add_vector_observations(vector, vector_observations)

        # Add history trace
        # TODO: Improve when new history vector input format is in place
        if TraceOptions.HISTORY in trace_options:
            if (
                isinstance(figure_builder, VectorSubplotBuilder)
                and len(selected_input_providers.names()) > 0
            ):
                # Add history trace using first selected ensemble
                name = selected_input_providers.names()[0]
                provider = selected_input_providers.provider(name)
                vector_names = provider.vector_names()

                provider_vectors = [elm for elm in vectors if elm in vector_names]
                if provider_vectors:
                    history_vectors_df = create_history_vectors_df(
                        provider, provider_vectors, resampling_frequency
                    )
                    # TODO: Handle check of non-empty dataframe better?
                    if (
                        not history_vectors_df.empty
                        and "DATE" in history_vectors_df.columns
                    ):
                        figure_builder.add_history_traces(history_vectors_df)

            if isinstance(figure_builder, EnsembleSubplotBuilder):
                # Add history trace for each ensemble
                for name, provider in selected_input_providers.items():
                    vector_names = provider.vector_names()

                    provider_vectors = [elm for elm in vectors if elm in vector_names]
                    if provider_vectors:
                        history_vectors_df = create_history_vectors_df(
                            provider, provider_vectors, resampling_frequency
                        )
                        # TODO: Handle check of non-empty dataframe better?
                        if (
                            not history_vectors_df.empty
                            and "DATE" in history_vectors_df.columns
                        ):
                            figure_builder.add_history_traces(
                                history_vectors_df,
                                name,
                            )

        # Create legends when all data is added to figure
        figure_builder.create_graph_legends()

        return figure_builder.get_serialized_figure()
Beispiel #8
0
    def __init__(
        self,
        app: dash.Dash,
        webviz_settings: WebvizSettings,
        ensembles: Optional[list] = None,
        rel_file_pattern: str = "share/results/unsmry/*.arrow",
        perform_presampling: bool = False,
        obsfile: Path = None,
        options: dict = None,
        sampling: str = Frequency.MONTHLY.value,
        predefined_expressions: str = None,
        line_shape_fallback: str = "linear",
    ) -> None:
        super().__init__()

        # NOTE: Temporary css, pending on new wcc modal component.
        # See: https://github.com/equinor/webviz-core-components/issues/163
        WEBVIZ_ASSETS.add(
            Path(webviz_subsurface.__file__).parent / "_assets" / "css" /
            "modal.css")

        self._webviz_settings = webviz_settings
        self._obsfile = obsfile

        self._line_shape_fallback = set_simulation_line_shape_fallback(
            line_shape_fallback)

        # Must define valid freqency!
        if Frequency.from_string_value(sampling) is None:
            raise ValueError(
                'Sampling frequency conversion is "None", i.e. Raw sampling, and '
                "is not supported by plugin yet!")
        self._sampling = Frequency(sampling)
        self._presampled_frequency = None

        # TODO: Update functionality when allowing raw data and csv file input
        # NOTE: If csv is implemented-> handle/disable statistics, INTVL_, AVG_, delta
        # ensemble, etc.
        if ensembles is not None:
            ensemble_paths: Dict[str, Path] = {
                ensemble_name:
                webviz_settings.shared_settings["scratch_ensembles"]
                [ensemble_name]
                for ensemble_name in ensembles
            }
            if perform_presampling:
                self._presampled_frequency = self._sampling
                self._input_provider_set = create_presampled_provider_set_from_paths(
                    ensemble_paths, rel_file_pattern,
                    self._presampled_frequency)
            else:
                self._input_provider_set = create_lazy_provider_set_from_paths(
                    ensemble_paths, rel_file_pattern)
        else:
            raise ValueError('Incorrect argument, must provide "ensembles"')

        if not self._input_provider_set:
            raise ValueError(
                "Initial provider set is undefined, and ensemble summary providers"
                " are not instanciated for plugin")

        self._theme = webviz_settings.theme

        self._observations = {}
        if self._obsfile:
            self._observations = check_and_format_observations(
                get_path(self._obsfile))

        # NOTE: Initially keep set of all vector names - can make dynamic if wanted?
        vector_names = self._input_provider_set.all_vector_names()
        non_historical_vector_names = [
            vector for vector in vector_names
            if historical_vector(vector, None, False) not in vector_names
        ]

        # NOTE: Initially: With set of vector names, the vector selector data is static
        # Can be made dynamic based on selected ensembles - i.e. vectors present among
        # selected providers?
        self._vector_selector_base_data: list = []
        self._vector_calculator_data: list = []
        for vector in non_historical_vector_names:
            split = vector.split(":")
            add_vector_to_vector_selector_data(
                self._vector_selector_base_data,
                vector,
                simulation_vector_description(split[0]),
            )
            add_vector_to_vector_selector_data(
                self._vector_calculator_data,
                vector,
                simulation_vector_description(split[0]),
            )

            metadata = (self._input_provider_set.vector_metadata(vector)
                        if self._input_provider_set else None)
            if metadata and metadata.is_total:
                # Get the likely name for equivalent rate vector and make dropdown options.
                # Requires that the time_index was either defined or possible to infer.
                avgrate_vec = rename_vector_from_cumulative(vector=vector,
                                                            as_rate=True)
                interval_vec = rename_vector_from_cumulative(vector=vector,
                                                             as_rate=False)

                avgrate_split = avgrate_vec.split(":")
                interval_split = interval_vec.split(":")

                add_vector_to_vector_selector_data(
                    self._vector_selector_base_data,
                    avgrate_vec,
                    f"{simulation_vector_description(avgrate_split[0])} ({avgrate_vec})",
                )
                add_vector_to_vector_selector_data(
                    self._vector_selector_base_data,
                    interval_vec,
                    f"{simulation_vector_description(interval_split[0])} ({interval_vec})",
                )

        # Retreive predefined expressions from configuration and validate
        self._predefined_expressions_path = (
            None if predefined_expressions is None else webviz_settings.
            shared_settings["predefined_expressions"][predefined_expressions])
        self._predefined_expressions = expressions_from_config(
            get_path(self._predefined_expressions_path) if self.
            _predefined_expressions_path else None)
        for expression in self._predefined_expressions:
            valid, message = validate_predefined_expression(
                expression, self._vector_selector_base_data)
            if not valid:
                warnings.warn(message)
            expression["isValid"] = valid

        # Create initial vector selector data with predefined expressions
        self._initial_vector_selector_data = copy.deepcopy(
            self._vector_selector_base_data)
        add_expressions_to_vector_selector_data(
            self._initial_vector_selector_data, self._predefined_expressions)

        plot_options = options if options else {}
        self._initial_visualization_selection = VisualizationOptions(
            plot_options.get("visualization", "statistics"))
        self._initial_vectors: List[str] = []
        if "vectors" not in plot_options:
            self._initial_vectors = []
        for vector in [
                vector for vector in ["vector1", "vector2", "vector3"]
                if vector in plot_options
        ]:
            self._initial_vectors.append(plot_options[vector])
        self._initial_vectors = self._initial_vectors[:3]

        # Set callbacks
        self.set_callbacks(app)
Beispiel #9
0
    def __init__(
        self,
        app: dash.Dash,
        webviz_settings: WebvizSettings,
        ensembles: list,
        rel_file_pattern: str = "share/results/unsmry/*.arrow",
        sampling: str = Frequency.YEARLY.value,
        well_attributes_file: str = None,
        excl_name_startswith: list = None,
        excl_name_contains: list = None,
        phase_weights: dict = None,
    ):

        super().__init__()

        if phase_weights is None:
            phase_weights = {"Oil": 1.0, "Water": 1.0, "Gas": 300.0}
        self.weight_reduction_factor_oil = phase_weights["Oil"]
        self.weight_reduction_factor_wat = phase_weights["Water"]
        self.weight_reduction_factor_gas = phase_weights["Gas"]

        # Must define valid frequency
        self._sampling = Frequency(sampling)

        ensemble_paths: Dict[str, Path] = {
            ensemble_name:
            webviz_settings.shared_settings["scratch_ensembles"][ensemble_name]
            for ensemble_name in ensembles
        }

        self._input_provider_set = create_presampled_provider_set_from_paths(
            ensemble_paths, rel_file_pattern, self._sampling)

        logging.debug("Created presampled provider_set.")

        self.ensemble_names = self._input_provider_set.names()

        self.dates = {}
        self.realizations = {}
        self.wells = {}
        self.vectors = {}
        self.phases = {}

        self._well_attributes = (WellAttributesModel(
            self.ensemble_names[0],
            ensemble_paths[self.ensemble_names[0]],
            well_attributes_file,
        ) if well_attributes_file is not None else None)

        for ens_name in self.ensemble_names:
            logging.debug(f"Working with: {ens_name}")
            ens_provider = self._input_provider_set.provider(ens_name)
            self.realizations[ens_name] = ens_provider.realizations()
            self.dates[ens_name] = ens_provider.dates(
                resampling_frequency=None)

            # from wopt/wwpt/wgpt: get lists of wells, vectors and phases
            # drop wells included in user input "excl_name" lists
            (
                self.wells[ens_name],
                self.vectors[ens_name],
                self.phases[ens_name],
            ) = _get_wells_vectors_phases(ens_provider.vector_names(),
                                          excl_name_startswith,
                                          excl_name_contains)

        # self.well_collections = _get_well_collections_from_attr(well_attrs, self.wells)
        self.well_collections = _get_well_collections_from_attr(
            self.wells, self._well_attributes)

        self.set_callbacks(app)