def get_structure( assets: List[Asset], ) -> Tuple[Dict[str, AssetType], List[Market], Dict[str, Resource]]: """Get asset portfolio structured as Resources, based on AssetTypes present in a list of Assets. Initializing Resources leads to some database queries. :param assets: a list of Assets :returns: a tuple comprising: - a dictionary of resource names (as keys) and the asset type represented by these resources (as values) - a list of (unique) Markets that are relevant to these resources - a dictionary of resource names (as keys) and Resources (as values) """ # Set up a resource name for each asset type represented_asset_types = { asset_type.plural_name: asset_type for asset_type in [asset.asset_type for asset in assets] } # Load structure (and set up resources) resource_dict = {} markets: List[Market] = [] for resource_name in represented_asset_types.keys(): resource = Resource(resource_name) if len(resource.assets) == 0: continue resource_dict[resource_name] = resource markets.extend(list(set(asset.market for asset in resource.assets))) markets = list(set(markets)) return represented_asset_types, markets, resource_dict
def set_session_resource( assets: List[Asset], groups_with_assets: List[str] ) -> Optional[Resource]: """ Set session["resource"] to something, based on the available asset groups or the request. Returns the selected resource instance, or None. """ if ( "resource" in request.args ): # [GET] Set by user clicking on a link somewhere (e.g. dashboard) session["resource"] = request.args["resource"] if ( "resource" in request.form ): # [POST] Set by user in drop-down field. This overwrites GET, as the URL remains. session["resource"] = request.form["resource"] if "resource" not in session: # set some default, if possible if len(groups_with_assets) > 0: session["resource"] = groups_with_assets[0] elif len(assets) > 0: session["resource"] = assets[0].name else: return None return Resource(session["resource"])
def dashboard_view(): """Dashboard view. This is the default landing page for the platform user. It shows a map with the location and status of all of the user's assets, as well as a breakdown of the asset types in the user's portfolio. Assets for which the platform has identified upcoming balancing opportunities are highlighted. """ msg = "" if "clear-session" in request.values: clear_session() msg = "Your session was cleared." aggregate_groups = ["renewables", "EVSE"] asset_groups = get_asset_group_queries( custom_additional_groups=aggregate_groups) map_asset_groups = {} for asset_group_name in asset_groups: asset_group = Resource(asset_group_name) map_asset_groups[asset_group_name] = asset_group # Pack CDN resources (from pandas_bokeh/base.py) bokeh_html_embedded = "" for css in CDN.css_files: bokeh_html_embedded += ( """<link href="%s" rel="stylesheet" type="text/css">\n""" % css) for js in CDN.js_files: bokeh_html_embedded += """<script src="%s"></script>\n""" % js return render_flexmeasures_template( "views/dashboard.html", message=msg, bokeh_html_embedded=bokeh_html_embedded, mapboxAccessToken=current_app.config.get("MAPBOX_ACCESS_TOKEN", ""), map_center=get_center_location(user=current_user), asset_groups=map_asset_groups, aggregate_groups=aggregate_groups, )
def get_power_data( resource: Union[str, Resource], # name or instance show_consumption_as_positive: bool, showing_individual_traces_for: str, metrics: dict, query_window: Tuple[datetime, datetime], resolution: str, forecast_horizon: timedelta, ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, dict]: """Get power data and metrics. Return power observations, power forecasts and power schedules (each might be an empty DataFrame) and a dict with the following metrics: - expected value - mean absolute error - mean absolute percentage error - weighted absolute percentage error Todo: Power schedules ignore horizon. """ if isinstance(resource, str): resource = Resource(resource) default_columns = ["event_value", "belief_horizon", "source"] # Get power data if showing_individual_traces_for != "schedules": resource.load_sensor_data( sensor_types=[Power], start=query_window[0], end=query_window[-1], resolution=resolution, belief_horizon_window=(None, timedelta(hours=0)), exclude_source_types=["scheduling script"], ) if showing_individual_traces_for == "power": power_bdf = resource.power_data # In this case, power_bdf is actually a dict of BeliefDataFrames. # We join the frames into one frame, remembering -per frame- the sensor name as source. power_bdf = pd.concat([ set_bdf_source(bdf, sensor_name) for sensor_name, bdf in power_bdf.items() ]) else: # Here, we aggregate all rows together power_bdf = resource.aggregate_power_data power_df: pd.DataFrame = simplify_index( power_bdf, index_levels_to_columns=["belief_horizon", "source"]) if showing_individual_traces_for == "power": # In this case, we keep on indexing by source (as we have more than one) power_df.set_index("source", append=True, inplace=True) else: power_df = pd.DataFrame(columns=default_columns) # Get power forecast if showing_individual_traces_for == "none": power_forecast_bdf: tb.BeliefsDataFrame = resource.load_sensor_data( sensor_types=[Power], start=query_window[0], end=query_window[-1], resolution=resolution, belief_horizon_window=(forecast_horizon, None), exclude_source_types=["scheduling script"], ).aggregate_power_data power_forecast_df: pd.DataFrame = simplify_index( power_forecast_bdf, index_levels_to_columns=["belief_horizon", "source"]) else: power_forecast_df = pd.DataFrame(columns=default_columns) # Get power schedule if showing_individual_traces_for != "power": resource.load_sensor_data( sensor_types=[Power], start=query_window[0], end=query_window[-1], resolution=resolution, belief_horizon_window=(None, None), source_types=["scheduling script"], ) if showing_individual_traces_for == "schedules": power_schedule_bdf = resource.power_data power_schedule_bdf = pd.concat([ set_bdf_source(bdf, sensor_name) for sensor_name, bdf in power_schedule_bdf.items() ]) else: power_schedule_bdf = resource.aggregate_power_data power_schedule_df: pd.DataFrame = simplify_index( power_schedule_bdf, index_levels_to_columns=["belief_horizon", "source"]) if showing_individual_traces_for == "schedules": power_schedule_df.set_index("source", append=True, inplace=True) else: power_schedule_df = pd.DataFrame(columns=default_columns) if show_consumption_as_positive: power_df["event_value"] *= -1 power_forecast_df["event_value"] *= -1 power_schedule_df["event_value"] *= -1 # Calculate the power metrics power_hour_factor = time_utils.resolution_to_hour_factor(resolution) realised_power_in_mwh = pd.Series(power_df["event_value"] * power_hour_factor).values if not power_df.empty: metrics["realised_power_in_mwh"] = np.nansum(realised_power_in_mwh) else: metrics["realised_power_in_mwh"] = np.NaN if not power_forecast_df.empty and power_forecast_df.size == power_df.size: expected_power_in_mwh = pd.Series(power_forecast_df["event_value"] * power_hour_factor).values metrics["expected_power_in_mwh"] = np.nansum(expected_power_in_mwh) metrics["mae_power_in_mwh"] = calculations.mean_absolute_error( realised_power_in_mwh, expected_power_in_mwh) metrics["mape_power"] = calculations.mean_absolute_percentage_error( realised_power_in_mwh, expected_power_in_mwh) metrics[ "wape_power"] = calculations.weighted_absolute_percentage_error( realised_power_in_mwh, expected_power_in_mwh) else: metrics["expected_power_in_mwh"] = np.NaN metrics["mae_power_in_mwh"] = np.NaN metrics["mape_power"] = np.NaN metrics["wape_power"] = np.NaN return power_df, power_forecast_df, power_schedule_df, metrics