예제 #1
0
def dashboard_view():
    """Dashboard view.
    This is the default landing page for the platform user.
    It shows a map with the location and status of all of the user's assets,
    as well as a breakdown of the asset types in the user's portfolio.
    Assets for which the platform has identified upcoming balancing opportunities are highlighted.
    """
    msg = ""
    if "clear-session" in request.values:
        clear_session()
        msg = "Your session was cleared."

    aggregate_groups = ["renewables", "EVSE"]
    asset_groups = get_asset_group_queries(
        custom_additional_groups=aggregate_groups)
    map_asset_groups = {}
    for asset_group_name in asset_groups:
        asset_group = Resource(asset_group_name)
        map_asset_groups[asset_group_name] = asset_group

    # Pack CDN resources (from pandas_bokeh/base.py)
    bokeh_html_embedded = ""
    for css in CDN.css_files:
        bokeh_html_embedded += (
            """<link href="%s" rel="stylesheet" type="text/css">\n""" % css)
    for js in CDN.js_files:
        bokeh_html_embedded += """<script src="%s"></script>\n""" % js

    return render_flexmeasures_template(
        "views/dashboard.html",
        message=msg,
        bokeh_html_embedded=bokeh_html_embedded,
        mapboxAccessToken=current_app.config.get("MAPBOX_ACCESS_TOKEN", ""),
        map_center=get_center_location(user=current_user),
        asset_groups=map_asset_groups,
        aggregate_groups=aggregate_groups,
    )
예제 #2
0
def analytics_view():
    """Analytics view. Here, four plots (consumption/generation, weather, prices and a profit/loss calculation)
    and a table of metrics data are prepared. This view allows to select a resource name, from which a
    `models.Resource` object can be made. The resource name is kept in the session.
    Based on the resource, plots and table are labelled appropriately.
    """
    set_time_range_for_session()
    markets = get_markets()
    assets = get_assets(order_by_asset_attribute="display_name",
                        order_direction="asc")
    asset_groups = get_asset_group_queries(
        custom_additional_groups=["renewables", "EVSE", "each Charge Point"])
    asset_group_names: List[str] = [
        group for group in asset_groups if asset_groups[group].count() > 0
    ]
    selected_resource = set_session_resource(assets, asset_group_names)
    selected_market = set_session_market(selected_resource)
    sensor_types = get_sensor_types(selected_resource)
    selected_sensor_type = set_session_sensor_type(sensor_types)
    session_asset_types = selected_resource.unique_asset_types
    set_individual_traces_for_session()
    view_shows_individual_traces = (
        session["showing_individual_traces_for"] in ("power", "schedules")
        and selected_resource.is_eligible_for_comparing_individual_traces())

    query_window, resolution = ensure_timing_vars_are_set((None, None), None)

    # This is useful information - we might want to adapt the sign of the data and labels.
    showing_pure_consumption_data = all(
        [a.is_pure_consumer for a in selected_resource.assets])
    showing_pure_production_data = all(
        [a.is_pure_producer for a in selected_resource.assets])
    # Only show production positive if all assets are producers
    show_consumption_as_positive = False if showing_pure_production_data else True

    data, metrics, weather_type, selected_weather_sensor = get_data_and_metrics(
        query_window,
        resolution,
        show_consumption_as_positive,
        session["showing_individual_traces_for"]
        if view_shows_individual_traces else "none",
        selected_resource,
        selected_market,
        selected_sensor_type,
        selected_resource.assets,
    )

    # Set shared x range
    shared_x_range = Range1d(start=query_window[0], end=query_window[1])
    shared_x_range2 = Range1d(
        start=query_window[0], end=query_window[1]
    )  # only needed if we draw two legends (if individual traces are on)

    # TODO: get rid of this hack, which we use because we mock the current year's data from 2015 data in demo mode
    # Our demo server uses 2015 data as if it's the current year's data. Here we mask future beliefs.
    if current_app.config.get("FLEXMEASURES_MODE", "") == "demo":

        most_recent_quarter = time_utils.get_most_recent_quarter()

        # Show only past data, pretending we're in the current year
        if not data["power"].empty:
            data["power"] = data["power"].loc[
                data["power"].index.get_level_values(
                    "event_start") < most_recent_quarter]
        if not data["prices"].empty:
            data["prices"] = data["prices"].loc[
                data["prices"].index < most_recent_quarter +
                timedelta(hours=24)]  # keep tomorrow's prices
        if not data["weather"].empty:
            data["weather"] = data["weather"].loc[
                data["weather"].index < most_recent_quarter]
        if not data["rev_cost"].empty:
            data["rev_cost"] = data["rev_cost"].loc[
                data["rev_cost"].index.get_level_values(
                    "event_start") < most_recent_quarter]

        # Show forecasts only up to a limited horizon
        horizon_days = 10  # keep a 10 day forecast
        max_forecast_datetime = most_recent_quarter + timedelta(
            hours=horizon_days * 24)
        if not data["power_forecast"].empty:
            data["power_forecast"] = data["power_forecast"].loc[
                data["power_forecast"].index < max_forecast_datetime]
        if not data["prices_forecast"].empty:
            data["prices_forecast"] = data["prices_forecast"].loc[
                data["prices_forecast"].index < max_forecast_datetime]
        if not data["weather_forecast"].empty:
            data["weather_forecast"] = data["weather_forecast"].loc[
                data["weather_forecast"].index < max_forecast_datetime]
        if not data["rev_cost_forecast"].empty:
            data["rev_cost_forecast"] = data["rev_cost_forecast"].loc[
                data["rev_cost_forecast"].index < max_forecast_datetime]

    # Making figures
    tools = ["box_zoom", "reset", "save"]
    power_fig = make_power_figure(
        selected_resource.display_name,
        data["power"],
        data["power_forecast"],
        data["power_schedule"],
        show_consumption_as_positive,
        shared_x_range,
        tools=tools,
    )
    rev_cost_fig = make_revenues_costs_figure(
        selected_resource.display_name,
        data["rev_cost"],
        data["rev_cost_forecast"],
        show_consumption_as_positive,
        shared_x_range,
        selected_market,
        tools=tools,
    )
    # the bottom plots need a separate x axis if they get their own legend (Bokeh complains otherwise)
    # this means in that in that corner case zooming will not work across all foour plots
    prices_fig = make_prices_figure(
        data["prices"],
        data["prices_forecast"],
        shared_x_range2 if view_shows_individual_traces else shared_x_range,
        selected_market,
        tools=tools,
    )
    weather_fig = make_weather_figure(
        selected_resource,
        data["weather"],
        data["weather_forecast"],
        shared_x_range2 if view_shows_individual_traces else shared_x_range,
        selected_weather_sensor,
        tools=tools,
    )

    # Separate a single legend and remove the others.
    # In case of individual traces, we need two legends.
    top_legend_fig = separate_legend(power_fig, orientation="horizontal")
    top_legend_script, top_legend_div = components(top_legend_fig)
    rev_cost_fig.renderers.remove(rev_cost_fig.legend[0])
    if view_shows_individual_traces:
        bottom_legend_fig = separate_legend(weather_fig,
                                            orientation="horizontal")
        prices_fig.renderers.remove(prices_fig.legend[0])
        bottom_legend_script, bottom_legend_div = components(bottom_legend_fig)
    else:
        prices_fig.renderers.remove(prices_fig.legend[0])
        weather_fig.renderers.remove(weather_fig.legend[0])
        bottom_legend_fig = bottom_legend_script = bottom_legend_div = None

    analytics_plots_script, analytics_plots_divs = components(
        (power_fig, rev_cost_fig, prices_fig, weather_fig))

    return render_flexmeasures_template(
        "views/analytics.html",
        top_legend_height=top_legend_fig.plot_height,
        top_legend_script=top_legend_script,
        top_legend_div=top_legend_div,
        bottom_legend_height=0
        if bottom_legend_fig is None else bottom_legend_fig.plot_height,
        bottom_legend_script=bottom_legend_script,
        bottom_legend_div=bottom_legend_div,
        analytics_plots_divs=[
            encode_utf8(div) for div in analytics_plots_divs
        ],
        analytics_plots_script=analytics_plots_script,
        metrics=metrics,
        markets=markets,
        sensor_types=sensor_types,
        assets=assets,
        asset_group_names=asset_group_names,
        selected_market=selected_market,
        selected_resource=selected_resource,
        selected_sensor_type=selected_sensor_type,
        selected_sensor=selected_weather_sensor,
        asset_types=session_asset_types,
        showing_pure_consumption_data=showing_pure_consumption_data,
        showing_pure_production_data=showing_pure_production_data,
        show_consumption_as_positive=show_consumption_as_positive,
        showing_individual_traces_for=session["showing_individual_traces_for"],
        offer_showing_individual_traces=selected_resource.
        is_eligible_for_comparing_individual_traces(),
        forecast_horizons=time_utils.forecast_horizons_for(
            session["resolution"]),
        active_forecast_horizon=session["forecast_horizon"],
    )
예제 #3
0
def analytics_data_view(content, content_type):
    """Analytics view as above, but here we only download data.
    Content can be either source or metrics.
    Content-type can be either CSV or JSON.
    """
    # if current_app.config.get("FLEXMEASURES_MODE", "") != "play":
    #    raise NotImplementedError("The analytics data download only works in play mode.")
    if content not in ("source", "metrics"):
        if content is None:
            content = "data"
        else:
            raise NotImplementedError(
                "content can either be source or metrics.")
    if content_type not in ("csv", "json"):
        if content_type is None:
            content_type = "csv"
        else:
            raise NotImplementedError(
                "content_type can either be csv or json.")

    time_utils.set_time_range_for_session()

    # Maybe move some of this stuff into get_data_and_metrics
    assets = get_assets(order_by_asset_attribute="display_name",
                        order_direction="asc")
    asset_groups = get_asset_group_queries(
        custom_additional_groups=["renewables", "EVSE", "each Charge Point"])
    asset_group_names: List[str] = [
        group for group in asset_groups if asset_groups[group].count() > 0
    ]
    selected_resource = set_session_resource(assets, asset_group_names)
    selected_market = set_session_market(selected_resource)
    sensor_types = get_sensor_types(selected_resource)
    selected_sensor_type = set_session_sensor_type(sensor_types)

    # This is useful information - we might want to adapt the sign of the data and labels.
    showing_pure_consumption_data = all(
        [a.is_pure_consumer for a in selected_resource.assets])
    showing_pure_production_data = all(
        [a.is_pure_producer for a in selected_resource.assets])
    # Only show production positive if all assets are producers
    show_consumption_as_positive = False if showing_pure_production_data else True

    # Getting data and calculating metrics for them
    query_window, resolution = ensure_timing_vars_are_set((None, None), None)
    data, metrics, weather_type, selected_weather_sensor = get_data_and_metrics(
        query_window,
        resolution,
        show_consumption_as_positive,
        "none",
        selected_resource,
        selected_market,
        selected_sensor_type,
        selected_resource.assets,
    )

    hor = session["forecast_horizon"]
    rev_cost_header = ("costs/revenues"
                       if show_consumption_as_positive else "revenues/costs")
    if showing_pure_consumption_data:
        rev_cost_header = "costs"
    elif showing_pure_production_data:
        rev_cost_header = "revenues"
    source_headers = [
        "time",
        "power_data_label",
        "power",
        "power_forecast_label",
        f"power_forecast_{hor}",
        f"{weather_type}_label",
        f"{weather_type}",
        f"{weather_type}_forecast_label",
        f"{weather_type}_forecast_{hor}",
        "price_label",
        f"price_on_{selected_market.name}",
        "price_forecast_label",
        f"price_forecast_{hor}",
        f"{rev_cost_header}_label",
        rev_cost_header,
        f"{rev_cost_header}_forecast_label",
        f"{rev_cost_header}_forecast_{hor}",
    ]
    source_units = [
        "",
        "",
        "MW",
        "",
        "MW",
        "",
        selected_weather_sensor.unit,
        "",
        selected_weather_sensor.unit,
        "",
        selected_market.price_unit,
        "",
        selected_market.price_unit,
        "",
        selected_market.price_unit[:3],
        "",
        selected_market.price_unit[:3],
    ]
    filename_prefix = "%s_analytics" % selected_resource.name
    if content_type == "csv":
        str_io = io.StringIO()
        writer = csv.writer(str_io, dialect="excel")
        if content == "metrics":
            filename = "%s_metrics.csv" % filename_prefix
            writer.writerow(metrics.keys())
            writer.writerow(metrics.values())
        else:
            filename = "%s_source.csv" % filename_prefix
            writer.writerow(source_headers)
            writer.writerow(source_units)
            for dt in data["rev_cost"].index:
                row = [
                    dt,
                    data["power"].loc[dt].label if "label"
                    in data["power"].columns else "Aggregated power data",
                    data["power"].loc[dt]["event_value"],
                    data["power_forecast"].loc[dt].label
                    if "label" in data["power_forecast"].columns else
                    "Aggregated power forecast data",
                    data["power_forecast"].loc[dt]["event_value"],
                    data["weather"].loc[dt].label
                    if "label" in data["weather"].columns else
                    f"Aggregated {weather_type} data",
                    data["weather"].loc[dt]["event_value"],
                    data["weather_forecast"].loc[dt].label
                    if "label" in data["weather_forecast"].columns else
                    f"Aggregated {weather_type} forecast data",
                    data["weather_forecast"].loc[dt]["event_value"],
                    data["prices"].loc[dt].label if "label"
                    in data["prices"].columns else "Aggregated power data",
                    data["prices"].loc[dt]["event_value"],
                    data["prices_forecast"].loc[dt].label
                    if "label" in data["prices_forecast"].columns else
                    "Aggregated power data",
                    data["prices_forecast"].loc[dt]["event_value"],
                    data["rev_cost"].loc[dt].label
                    if "label" in data["rev_cost"].columns else
                    f"Aggregated {rev_cost_header} data",
                    data["rev_cost"].loc[dt]["event_value"],
                    data["rev_cost_forecast"].loc[dt].label
                    if "label" in data["rev_cost_forecast"].columns else
                    f"Aggregated {rev_cost_header} forecast data",
                    data["rev_cost_forecast"].loc[dt]["event_value"],
                ]
                writer.writerow(row)

        response = make_response(str_io.getvalue())
        response.headers[
            "Content-Disposition"] = "attachment; filename=%s" % filename
        response.headers["Content-type"] = "text/csv"
    else:
        if content == "metrics":
            filename = "%s_metrics.json" % filename_prefix
            response = make_response(json.dumps(metrics))
        else:
            # Not quite done yet. I don't like how we treat forecasts in here yet. Not sure how to mention units.
            filename = "%s_source.json" % filename_prefix
            json_strings = []
            for key in data:
                json_strings.append(
                    f"\"{key}\":{data[key].to_json(orient='index', date_format='iso')}"
                )
            response = make_response("{%s}" % ",".join(json_strings))
        response.headers[
            "Content-Disposition"] = "attachment; filename=%s" % filename
        response.headers["Content-type"] = "application/json"
    return response