示例#1
0
def get(args):
    """List all assets, or the ones owned by a certain user.
    Raise if a non-admin tries to see assets owned by someone else.
    """
    if "owner_id" in args:
        # get_assets ignores owner_id if user is not admin. Here we want to raise a proper auth error.
        if not (current_user.has_role("admin")
                or args["owner_id"] == current_user.id):
            return unauthorized_handler(None, [])
        assets = get_assets(owner_id=int(args["owner_id"]))
    else:
        assets = get_assets()

    return assets_schema.dump(assets), 200
示例#2
0
def account_view():
    return render_flexmeasures_template(
        "admin/account.html",
        logged_in_user=current_user,
        roles=",".join([role.name for role in current_user.roles]),
        num_assets=len(get_assets()),
    )
示例#3
0
def logged_in_user_view():
    """TODO:
    - Show account name & roles
    - Count their assets with a query, link to their (new) list
    """
    return render_flexmeasures_template(
        "admin/logged_in_user.html",
        logged_in_user=current_user,
        roles=",".join([role.name for role in current_user.roles]),
        num_assets=len(get_assets()),
    )
def get_connection_response():

    # Look up Asset objects
    user_assets = get_assets()

    # Return entity addresses of assets
    message = dict(connections=[asset.entity_address for asset in user_assets])
    if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
        message["names"] = [asset.name for asset in user_assets]
    else:
        message["names"] = [asset.display_name for asset in user_assets]

    return message
def create_connection_and_value_groups(  # noqa: C901
        unit, generic_asset_name_groups, value_groups, horizon, rolling, start,
        duration):
    """
    Code for POSTing Power values to the API.
    Only lets users post to assets they own.
    The sign of values is validated according to asset specs, but in USEF terms.
    Then, we store the reverse sign for FlexMeasures specs (with positive production
    and negative consumption).

    If power values are not forecasts, forecasting jobs are created.
    """
    from flask import current_app

    current_app.logger.info("POSTING POWER DATA")
    data_source = get_or_create_user_data_source(current_user)
    user_assets = get_assets()
    if not user_assets:
        current_app.logger.info("User doesn't seem to have any assets")
    user_asset_ids = [asset.id for asset in user_assets]
    power_measurements = []
    forecasting_jobs = []
    for connection_group, value_group in zip(generic_asset_name_groups,
                                             value_groups):
        for connection in connection_group:

            # TODO: get asset through util function after refactoring
            # Parse the entity address
            try:
                connection = parse_entity_address(connection,
                                                  entity_type="connection")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            asset_id = connection["asset_id"]

            # Look for the Asset object
            if asset_id in user_asset_ids:
                asset = Asset.query.filter(Asset.id == asset_id).one_or_none()
            else:
                current_app.logger.warning("Cannot identify connection %s" %
                                           connection)
                return unrecognized_connection_group()

            # Validate the sign of the values (following USEF specs with positive consumption and negative production)
            if asset.is_pure_consumer and any(v < 0 for v in value_group):
                extra_info = (
                    "Connection %s is registered as a pure consumer and can only receive non-negative values."
                    % asset.entity_address)
                return power_value_too_small(extra_info)
            elif asset.is_pure_producer and any(v > 0 for v in value_group):
                extra_info = (
                    "Connection %s is registered as a pure producer and can only receive non-positive values."
                    % asset.entity_address)
                return power_value_too_big(extra_info)

            # Create new Power objects
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - ((start + duration) -
                                   (dt + duration / len(value_group)))
                p = Power(
                    datetime=dt,
                    value=value *
                    -1,  # Reverse sign for FlexMeasures specs with positive production and negative consumption
                    horizon=h,
                    asset_id=asset.id,
                    data_source_id=data_source.id,
                )
                power_measurements.append(p)

            # make forecasts, but only if the sent-in values are not forecasts themselves
            if horizon <= timedelta(
                    hours=0
            ):  # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset
                forecasting_jobs.extend(
                    create_forecasting_jobs(
                        "Power",
                        asset_id,
                        start,
                        start + duration,
                        resolution=duration / len(value_group),
                        enqueue=False,
                    ))

    current_app.logger.info("SAVING TO DB AND QUEUEING...")
    try:
        save_to_session(power_measurements)
        db.session.flush()
        [
            current_app.queues["forecasting"].enqueue_job(job)
            for job in forecasting_jobs
        ]
        db.session.commit()
        return request_processed()
    except IntegrityError as e:
        current_app.logger.warning(e)
        db.session.rollback()

        # Allow meter data to be replaced only in play mode
        if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
            save_to_session(power_measurements, overwrite=True)
            [
                current_app.queues["forecasting"].enqueue_job(job)
                for job in forecasting_jobs
            ]
            db.session.commit()
            return request_processed()
        else:
            return already_received_and_successfully_processed()
def collect_connection_and_value_groups(
    unit: str,
    resolution: str,
    belief_horizon_window: Tuple[Union[None, timedelta], Union[None,
                                                               timedelta]],
    belief_time_window: Tuple[Optional[datetime_type],
                              Optional[datetime_type]],
    start: datetime_type,
    duration: timedelta,
    connection_groups: List[List[str]],
    user_source_ids: Union[
        int, List[int]] = None,  # None is interpreted as all sources
    source_types: List[str] = None,
) -> Tuple[dict, int]:
    """
    Code for GETting power values from the API.
    Only allows to get values from assets owned by current user.
    Returns value sign in accordance with USEF specs
    (with negative production and positive consumption).
    """
    from flask import current_app

    current_app.logger.info("GETTING")
    user_assets = get_assets()
    if not user_assets:
        current_app.logger.info("User doesn't seem to have any assets")
    user_asset_ids = [asset.id for asset in user_assets]

    end = start + duration
    value_groups = []
    new_connection_groups = (
        []
    )  # Each connection in the old connection groups will be interpreted as a separate group
    for connections in connection_groups:

        # Get the asset names
        asset_names: List[str] = []
        for connection in connections:

            # Parse the entity address
            try:
                connection_details = parse_entity_address(
                    connection, entity_type="connection")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            asset_id = connection_details["asset_id"]

            # Look for the Asset object
            if asset_id in user_asset_ids:
                asset = Asset.query.filter(Asset.id == asset_id).one_or_none()
            else:
                current_app.logger.warning("Cannot identify connection %s" %
                                           connection)
                return unrecognized_connection_group()
            asset_names.append(asset.name)

        # Get the power values
        # TODO: fill NaN for non-existing values
        power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Power.collect(
            generic_asset_names=asset_names,
            query_window=(start, end),
            resolution=resolution,
            belief_horizon_window=belief_horizon_window,
            belief_time_window=belief_time_window,
            user_source_ids=user_source_ids,
            source_types=source_types,
            sum_multiple=False,
        )
        # Todo: parse time window of power_bdf_dict, which will be different for requests that are not of the form:
        # - start is a timestamp on the hour or a multiple of 15 minutes thereafter
        # - duration is a multiple of 15 minutes
        for k, bdf in power_bdf_dict.items():
            value_groups.append(
                [x * -1 for x in bdf["event_value"].tolist()]
            )  # Reverse sign of values (from FlexMeasures specs to USEF specs)
            new_connection_groups.append(k)
    response = groups_to_dict(new_connection_groups,
                              value_groups,
                              generic_asset_type_name="connection")
    response["start"] = isodate.datetime_isoformat(start)
    response["duration"] = isodate.duration_isoformat(duration)
    response["unit"] = unit  # TODO: convert to requested unit

    d, s = request_processed()
    return dict(**response, **d), s
示例#7
0
def analytics_view():
    """Analytics view. Here, four plots (consumption/generation, weather, prices and a profit/loss calculation)
    and a table of metrics data are prepared. This view allows to select a resource name, from which a
    `models.Resource` object can be made. The resource name is kept in the session.
    Based on the resource, plots and table are labelled appropriately.
    """
    set_time_range_for_session()
    markets = get_markets()
    assets = get_assets(order_by_asset_attribute="display_name",
                        order_direction="asc")
    asset_groups = get_asset_group_queries(
        custom_additional_groups=["renewables", "EVSE", "each Charge Point"])
    asset_group_names: List[str] = [
        group for group in asset_groups if asset_groups[group].count() > 0
    ]
    selected_resource = set_session_resource(assets, asset_group_names)
    selected_market = set_session_market(selected_resource)
    sensor_types = get_sensor_types(selected_resource)
    selected_sensor_type = set_session_sensor_type(sensor_types)
    session_asset_types = selected_resource.unique_asset_types
    set_individual_traces_for_session()
    view_shows_individual_traces = (
        session["showing_individual_traces_for"] in ("power", "schedules")
        and selected_resource.is_eligible_for_comparing_individual_traces())

    query_window, resolution = ensure_timing_vars_are_set((None, None), None)

    # This is useful information - we might want to adapt the sign of the data and labels.
    showing_pure_consumption_data = all(
        [a.is_pure_consumer for a in selected_resource.assets])
    showing_pure_production_data = all(
        [a.is_pure_producer for a in selected_resource.assets])
    # Only show production positive if all assets are producers
    show_consumption_as_positive = False if showing_pure_production_data else True

    data, metrics, weather_type, selected_weather_sensor = get_data_and_metrics(
        query_window,
        resolution,
        show_consumption_as_positive,
        session["showing_individual_traces_for"]
        if view_shows_individual_traces else "none",
        selected_resource,
        selected_market,
        selected_sensor_type,
        selected_resource.assets,
    )

    # Set shared x range
    shared_x_range = Range1d(start=query_window[0], end=query_window[1])
    shared_x_range2 = Range1d(
        start=query_window[0], end=query_window[1]
    )  # only needed if we draw two legends (if individual traces are on)

    # TODO: get rid of this hack, which we use because we mock the current year's data from 2015 data in demo mode
    # Our demo server uses 2015 data as if it's the current year's data. Here we mask future beliefs.
    if current_app.config.get("FLEXMEASURES_MODE", "") == "demo":

        most_recent_quarter = time_utils.get_most_recent_quarter()

        # Show only past data, pretending we're in the current year
        if not data["power"].empty:
            data["power"] = data["power"].loc[
                data["power"].index.get_level_values(
                    "event_start") < most_recent_quarter]
        if not data["prices"].empty:
            data["prices"] = data["prices"].loc[
                data["prices"].index < most_recent_quarter +
                timedelta(hours=24)]  # keep tomorrow's prices
        if not data["weather"].empty:
            data["weather"] = data["weather"].loc[
                data["weather"].index < most_recent_quarter]
        if not data["rev_cost"].empty:
            data["rev_cost"] = data["rev_cost"].loc[
                data["rev_cost"].index.get_level_values(
                    "event_start") < most_recent_quarter]

        # Show forecasts only up to a limited horizon
        horizon_days = 10  # keep a 10 day forecast
        max_forecast_datetime = most_recent_quarter + timedelta(
            hours=horizon_days * 24)
        if not data["power_forecast"].empty:
            data["power_forecast"] = data["power_forecast"].loc[
                data["power_forecast"].index < max_forecast_datetime]
        if not data["prices_forecast"].empty:
            data["prices_forecast"] = data["prices_forecast"].loc[
                data["prices_forecast"].index < max_forecast_datetime]
        if not data["weather_forecast"].empty:
            data["weather_forecast"] = data["weather_forecast"].loc[
                data["weather_forecast"].index < max_forecast_datetime]
        if not data["rev_cost_forecast"].empty:
            data["rev_cost_forecast"] = data["rev_cost_forecast"].loc[
                data["rev_cost_forecast"].index < max_forecast_datetime]

    # Making figures
    tools = ["box_zoom", "reset", "save"]
    power_fig = make_power_figure(
        selected_resource.display_name,
        data["power"],
        data["power_forecast"],
        data["power_schedule"],
        show_consumption_as_positive,
        shared_x_range,
        tools=tools,
    )
    rev_cost_fig = make_revenues_costs_figure(
        selected_resource.display_name,
        data["rev_cost"],
        data["rev_cost_forecast"],
        show_consumption_as_positive,
        shared_x_range,
        selected_market,
        tools=tools,
    )
    # the bottom plots need a separate x axis if they get their own legend (Bokeh complains otherwise)
    # this means in that in that corner case zooming will not work across all foour plots
    prices_fig = make_prices_figure(
        data["prices"],
        data["prices_forecast"],
        shared_x_range2 if view_shows_individual_traces else shared_x_range,
        selected_market,
        tools=tools,
    )
    weather_fig = make_weather_figure(
        selected_resource,
        data["weather"],
        data["weather_forecast"],
        shared_x_range2 if view_shows_individual_traces else shared_x_range,
        selected_weather_sensor,
        tools=tools,
    )

    # Separate a single legend and remove the others.
    # In case of individual traces, we need two legends.
    top_legend_fig = separate_legend(power_fig, orientation="horizontal")
    top_legend_script, top_legend_div = components(top_legend_fig)
    rev_cost_fig.renderers.remove(rev_cost_fig.legend[0])
    if view_shows_individual_traces:
        bottom_legend_fig = separate_legend(weather_fig,
                                            orientation="horizontal")
        prices_fig.renderers.remove(prices_fig.legend[0])
        bottom_legend_script, bottom_legend_div = components(bottom_legend_fig)
    else:
        prices_fig.renderers.remove(prices_fig.legend[0])
        weather_fig.renderers.remove(weather_fig.legend[0])
        bottom_legend_fig = bottom_legend_script = bottom_legend_div = None

    analytics_plots_script, analytics_plots_divs = components(
        (power_fig, rev_cost_fig, prices_fig, weather_fig))

    return render_flexmeasures_template(
        "views/analytics.html",
        top_legend_height=top_legend_fig.plot_height,
        top_legend_script=top_legend_script,
        top_legend_div=top_legend_div,
        bottom_legend_height=0
        if bottom_legend_fig is None else bottom_legend_fig.plot_height,
        bottom_legend_script=bottom_legend_script,
        bottom_legend_div=bottom_legend_div,
        analytics_plots_divs=[
            encode_utf8(div) for div in analytics_plots_divs
        ],
        analytics_plots_script=analytics_plots_script,
        metrics=metrics,
        markets=markets,
        sensor_types=sensor_types,
        assets=assets,
        asset_group_names=asset_group_names,
        selected_market=selected_market,
        selected_resource=selected_resource,
        selected_sensor_type=selected_sensor_type,
        selected_sensor=selected_weather_sensor,
        asset_types=session_asset_types,
        showing_pure_consumption_data=showing_pure_consumption_data,
        showing_pure_production_data=showing_pure_production_data,
        show_consumption_as_positive=show_consumption_as_positive,
        showing_individual_traces_for=session["showing_individual_traces_for"],
        offer_showing_individual_traces=selected_resource.
        is_eligible_for_comparing_individual_traces(),
        forecast_horizons=time_utils.forecast_horizons_for(
            session["resolution"]),
        active_forecast_horizon=session["forecast_horizon"],
    )
示例#8
0
def analytics_data_view(content, content_type):
    """Analytics view as above, but here we only download data.
    Content can be either source or metrics.
    Content-type can be either CSV or JSON.
    """
    # if current_app.config.get("FLEXMEASURES_MODE", "") != "play":
    #    raise NotImplementedError("The analytics data download only works in play mode.")
    if content not in ("source", "metrics"):
        if content is None:
            content = "data"
        else:
            raise NotImplementedError(
                "content can either be source or metrics.")
    if content_type not in ("csv", "json"):
        if content_type is None:
            content_type = "csv"
        else:
            raise NotImplementedError(
                "content_type can either be csv or json.")

    time_utils.set_time_range_for_session()

    # Maybe move some of this stuff into get_data_and_metrics
    assets = get_assets(order_by_asset_attribute="display_name",
                        order_direction="asc")
    asset_groups = get_asset_group_queries(
        custom_additional_groups=["renewables", "EVSE", "each Charge Point"])
    asset_group_names: List[str] = [
        group for group in asset_groups if asset_groups[group].count() > 0
    ]
    selected_resource = set_session_resource(assets, asset_group_names)
    selected_market = set_session_market(selected_resource)
    sensor_types = get_sensor_types(selected_resource)
    selected_sensor_type = set_session_sensor_type(sensor_types)

    # This is useful information - we might want to adapt the sign of the data and labels.
    showing_pure_consumption_data = all(
        [a.is_pure_consumer for a in selected_resource.assets])
    showing_pure_production_data = all(
        [a.is_pure_producer for a in selected_resource.assets])
    # Only show production positive if all assets are producers
    show_consumption_as_positive = False if showing_pure_production_data else True

    # Getting data and calculating metrics for them
    query_window, resolution = ensure_timing_vars_are_set((None, None), None)
    data, metrics, weather_type, selected_weather_sensor = get_data_and_metrics(
        query_window,
        resolution,
        show_consumption_as_positive,
        "none",
        selected_resource,
        selected_market,
        selected_sensor_type,
        selected_resource.assets,
    )

    hor = session["forecast_horizon"]
    rev_cost_header = ("costs/revenues"
                       if show_consumption_as_positive else "revenues/costs")
    if showing_pure_consumption_data:
        rev_cost_header = "costs"
    elif showing_pure_production_data:
        rev_cost_header = "revenues"
    source_headers = [
        "time",
        "power_data_label",
        "power",
        "power_forecast_label",
        f"power_forecast_{hor}",
        f"{weather_type}_label",
        f"{weather_type}",
        f"{weather_type}_forecast_label",
        f"{weather_type}_forecast_{hor}",
        "price_label",
        f"price_on_{selected_market.name}",
        "price_forecast_label",
        f"price_forecast_{hor}",
        f"{rev_cost_header}_label",
        rev_cost_header,
        f"{rev_cost_header}_forecast_label",
        f"{rev_cost_header}_forecast_{hor}",
    ]
    source_units = [
        "",
        "",
        "MW",
        "",
        "MW",
        "",
        selected_weather_sensor.unit,
        "",
        selected_weather_sensor.unit,
        "",
        selected_market.price_unit,
        "",
        selected_market.price_unit,
        "",
        selected_market.price_unit[:3],
        "",
        selected_market.price_unit[:3],
    ]
    filename_prefix = "%s_analytics" % selected_resource.name
    if content_type == "csv":
        str_io = io.StringIO()
        writer = csv.writer(str_io, dialect="excel")
        if content == "metrics":
            filename = "%s_metrics.csv" % filename_prefix
            writer.writerow(metrics.keys())
            writer.writerow(metrics.values())
        else:
            filename = "%s_source.csv" % filename_prefix
            writer.writerow(source_headers)
            writer.writerow(source_units)
            for dt in data["rev_cost"].index:
                row = [
                    dt,
                    data["power"].loc[dt].label if "label"
                    in data["power"].columns else "Aggregated power data",
                    data["power"].loc[dt]["event_value"],
                    data["power_forecast"].loc[dt].label
                    if "label" in data["power_forecast"].columns else
                    "Aggregated power forecast data",
                    data["power_forecast"].loc[dt]["event_value"],
                    data["weather"].loc[dt].label
                    if "label" in data["weather"].columns else
                    f"Aggregated {weather_type} data",
                    data["weather"].loc[dt]["event_value"],
                    data["weather_forecast"].loc[dt].label
                    if "label" in data["weather_forecast"].columns else
                    f"Aggregated {weather_type} forecast data",
                    data["weather_forecast"].loc[dt]["event_value"],
                    data["prices"].loc[dt].label if "label"
                    in data["prices"].columns else "Aggregated power data",
                    data["prices"].loc[dt]["event_value"],
                    data["prices_forecast"].loc[dt].label
                    if "label" in data["prices_forecast"].columns else
                    "Aggregated power data",
                    data["prices_forecast"].loc[dt]["event_value"],
                    data["rev_cost"].loc[dt].label
                    if "label" in data["rev_cost"].columns else
                    f"Aggregated {rev_cost_header} data",
                    data["rev_cost"].loc[dt]["event_value"],
                    data["rev_cost_forecast"].loc[dt].label
                    if "label" in data["rev_cost_forecast"].columns else
                    f"Aggregated {rev_cost_header} forecast data",
                    data["rev_cost_forecast"].loc[dt]["event_value"],
                ]
                writer.writerow(row)

        response = make_response(str_io.getvalue())
        response.headers[
            "Content-Disposition"] = "attachment; filename=%s" % filename
        response.headers["Content-type"] = "text/csv"
    else:
        if content == "metrics":
            filename = "%s_metrics.json" % filename_prefix
            response = make_response(json.dumps(metrics))
        else:
            # Not quite done yet. I don't like how we treat forecasts in here yet. Not sure how to mention units.
            filename = "%s_source.json" % filename_prefix
            json_strings = []
            for key in data:
                json_strings.append(
                    f"\"{key}\":{data[key].to_json(orient='index', date_format='iso')}"
                )
            response = make_response("{%s}" % ",".join(json_strings))
        response.headers[
            "Content-Disposition"] = "attachment; filename=%s" % filename
        response.headers["Content-type"] = "application/json"
    return response
示例#9
0
def portfolio_view():  # noqa: C901
    """Portfolio view.
    By default, this page shows live results (production, consumption and market data) from the user's portfolio.
    Time windows for which the platform has identified upcoming balancing opportunities are highlighted.
    The page can also be used to navigate historical results.
    """

    set_time_range_for_session()
    start = session.get("start_time")
    end = session.get("end_time")
    resolution = session.get("resolution")

    # Get plot perspective
    perspectives = ["production", "consumption"]
    default_stack_side = "production"  # todo: move to user config setting
    show_stacked = request.values.get("show_stacked", default_stack_side)
    perspectives.remove(show_stacked)
    show_summed: str = perspectives[0]
    plot_label = f"Stacked {show_stacked} vs aggregated {show_summed}"

    # Get structure and data
    assets: List[Asset] = get_assets(
        order_by_asset_attribute="display_name", order_direction="asc"
    )
    represented_asset_types, markets, resource_dict = get_structure(assets)
    for resource_name, resource in resource_dict.items():
        resource.load_sensor_data(
            [Power, Price],
            start=start,
            end=end,
            resolution=resolution,
            exclude_source_types=["scheduling script"],
        )  # The resource caches the results
    (
        supply_resources_df_dict,
        demand_resources_df_dict,
        production_per_asset_type,
        consumption_per_asset_type,
        production_per_asset,
        consumption_per_asset,
    ) = get_power_data(resource_dict)
    price_bdf_dict, average_price_dict = get_price_data(resource_dict)

    # Pick a perspective for summing and for stacking
    sum_dict = (
        demand_resources_df_dict.values()
        if show_summed == "consumption"
        else supply_resources_df_dict.values()
    )
    power_sum_df = (
        pd.concat(sum_dict, axis=1).sum(axis=1).to_frame(name="event_value")
        if sum_dict
        else pd.DataFrame()
    )
    stack_dict = (
        rename_event_value_column_to_resource_name(supply_resources_df_dict).values()
        if show_summed == "consumption"
        else rename_event_value_column_to_resource_name(
            demand_resources_df_dict
        ).values()
    )
    df_stacked_data = pd.concat(stack_dict, axis=1) if stack_dict else pd.DataFrame()

    # Create summed plot
    power_sum_df = data_or_zeroes(power_sum_df, start, end, resolution)
    x_range = plotting.make_range(
        pd.date_range(start, end, freq=resolution, closed="left")
    )
    fig_profile = plotting.create_graph(
        power_sum_df,
        unit="MW",
        title=plot_label,
        x_range=x_range,
        x_label="Time (resolution of %s)"
        % time_utils.freq_label_to_human_readable_label(resolution),
        y_label="Power (in MW)",
        legend_location="top_right",
        legend_labels=(capitalize(show_summed), None, None),
        show_y_floats=True,
        non_negative_only=True,
    )
    fig_profile.plot_height = 450
    fig_profile.plot_width = 900

    # Create stacked plot
    df_stacked_data = data_or_zeroes(df_stacked_data, start, end, resolution)
    df_stacked_areas = stack_df(df_stacked_data)

    num_areas = df_stacked_areas.shape[1]
    if num_areas <= 2:
        colors = ["#99d594", "#dddd9d"]
    else:
        colors = palettes.brewer["Spectral"][num_areas]

    df_stacked_data = time_utils.tz_index_naively(df_stacked_data)
    x_points = np.hstack((df_stacked_data.index[::-1], df_stacked_data.index))

    fig_profile.grid.minor_grid_line_color = "#eeeeee"

    for a, area in enumerate(df_stacked_areas):
        fig_profile.patch(
            x_points,
            df_stacked_areas[area].values,
            color=colors[a],
            alpha=0.8,
            line_color=None,
            legend=df_stacked_data.columns[a],
            level="underlay",
        )

    # Flexibility numbers are mocked for now
    curtailment_per_asset = {a.name: 0 for a in assets}
    shifting_per_asset = {a.name: 0 for a in assets}
    profit_loss_flexibility_per_asset = {a.name: 0 for a in assets}
    curtailment_per_asset_type = {k: 0 for k in represented_asset_types.keys()}
    shifting_per_asset_type = {k: 0 for k in represented_asset_types.keys()}
    profit_loss_flexibility_per_asset_type = {
        k: 0 for k in represented_asset_types.keys()
    }
    shifting_per_asset["48_r"] = 1.1
    profit_loss_flexibility_per_asset["48_r"] = 76000
    shifting_per_asset_type["one-way EVSE"] = shifting_per_asset["48_r"]
    profit_loss_flexibility_per_asset_type[
        "one-way EVSE"
    ] = profit_loss_flexibility_per_asset["48_r"]
    curtailment_per_asset["hw-onshore"] = 1.3
    profit_loss_flexibility_per_asset["hw-onshore"] = 84000
    curtailment_per_asset_type["wind turbines"] = curtailment_per_asset["hw-onshore"]
    profit_loss_flexibility_per_asset_type[
        "wind turbines"
    ] = profit_loss_flexibility_per_asset["hw-onshore"]

    # Add referral to mocked control action
    this_hour = time_utils.get_most_recent_hour()
    next4am = [
        dt
        for dt in [this_hour + timedelta(hours=i) for i in range(1, 25)]
        if dt.hour == 4
    ][0]

    # TODO: show when user has (possible) actions in order book for a time slot
    if current_user.is_authenticated and (
        current_user.has_role("admin")
        or "wind" in current_user.email
        or "charging" in current_user.email
    ):
        plotting.highlight(
            fig_profile, next4am, next4am + timedelta(hours=1), redirect_to="/control"
        )

    # actions
    df_actions = pd.DataFrame(index=power_sum_df.index, columns=["event_value"]).fillna(
        0
    )
    if next4am in df_actions.index:
        if current_user.is_authenticated:
            if current_user.has_role("admin"):
                df_actions.loc[next4am] = -2.4  # mock two actions
            elif "wind" in current_user.email:
                df_actions.loc[next4am] = -1.3  # mock one action
            elif "charging" in current_user.email:
                df_actions.loc[next4am] = -1.1  # mock one action
    next2am = [
        dt
        for dt in [this_hour + timedelta(hours=i) for i in range(1, 25)]
        if dt.hour == 2
    ][0]
    if next2am in df_actions.index:
        if next2am < next4am and (
            current_user.is_authenticated
            and (
                current_user.has_role("admin")
                or "wind" in current_user.email
                or "charging" in current_user.email
            )
        ):
            # mock the shift "payback" (actually occurs earlier in our mock example)
            df_actions.loc[next2am] = 1.1
    next9am = [
        dt
        for dt in [this_hour + timedelta(hours=i) for i in range(1, 25)]
        if dt.hour == 9
    ][0]
    if next9am in df_actions.index:
        # mock some other ordered actions that are not in an opportunity hour anymore
        df_actions.loc[next9am] = 3.5

    fig_actions = plotting.create_graph(
        df_actions,
        unit="MW",
        title="Ordered balancing actions",
        x_range=x_range,
        y_label="Power (in MW)",
    )
    fig_actions.plot_height = 150
    fig_actions.plot_width = fig_profile.plot_width
    fig_actions.xaxis.visible = False

    if current_user.is_authenticated and (
        current_user.has_role("admin")
        or "wind" in current_user.email
        or "charging" in current_user.email
    ):
        plotting.highlight(
            fig_actions, next4am, next4am + timedelta(hours=1), redirect_to="/control"
        )

    portfolio_plots_script, portfolio_plots_divs = components(
        (fig_profile, fig_actions)
    )
    next24hours = [
        (time_utils.get_most_recent_hour() + timedelta(hours=i)).strftime("%I:00 %p")
        for i in range(1, 26)
    ]

    return render_flexmeasures_template(
        "views/portfolio.html",
        assets=assets,
        average_prices=average_price_dict,
        asset_types=represented_asset_types,
        markets=markets,
        production_per_asset=production_per_asset,
        consumption_per_asset=consumption_per_asset,
        curtailment_per_asset=curtailment_per_asset,
        shifting_per_asset=shifting_per_asset,
        profit_loss_flexibility_per_asset=profit_loss_flexibility_per_asset,
        production_per_asset_type=production_per_asset_type,
        consumption_per_asset_type=consumption_per_asset_type,
        curtailment_per_asset_type=curtailment_per_asset_type,
        shifting_per_asset_type=shifting_per_asset_type,
        profit_loss_flexibility_per_asset_type=profit_loss_flexibility_per_asset_type,
        sum_production=sum(production_per_asset_type.values()),
        sum_consumption=sum(consumption_per_asset_type.values()),
        sum_curtailment=sum(curtailment_per_asset_type.values()),
        sum_shifting=sum(shifting_per_asset_type.values()),
        sum_profit_loss_flexibility=sum(
            profit_loss_flexibility_per_asset_type.values()
        ),
        portfolio_plots_script=portfolio_plots_script,
        portfolio_plots_divs=portfolio_plots_divs,
        next24hours=next24hours,
        alt_stacking=show_summed,
    )
示例#10
0
def portfolio_view():  # noqa: C901
    """Portfolio view.
    By default, this page shows live results (production, consumption and market data) from the user's portfolio.
    Time windows for which the platform has identified upcoming balancing opportunities are highlighted.
    The page can also be used to navigate historical results.
    """

    set_time_range_for_session()
    start = session.get("start_time")
    end = session.get("end_time")
    resolution = session.get("resolution")

    # Get plot perspective
    perspectives = ["production", "consumption"]
    default_stack_side = "production"  # todo: move to user config setting
    show_stacked = request.values.get("show_stacked", default_stack_side)
    perspectives.remove(show_stacked)
    show_summed: str = perspectives[0]
    plot_label = f"Stacked {show_stacked} vs aggregated {show_summed}"

    # Get structure and data
    assets: List[Asset] = get_assets(order_by_asset_attribute="display_name",
                                     order_direction="asc")
    represented_asset_types, markets, resource_dict = get_structure(assets)
    for resource_name, resource in resource_dict.items():
        resource.load_sensor_data(
            [Power, Price],
            start=start,
            end=end,
            resolution=resolution,
            exclude_source_types=["scheduling script"],
        )  # The resource caches the results
    (
        supply_resources_df_dict,
        demand_resources_df_dict,
        production_per_asset_type,
        consumption_per_asset_type,
        production_per_asset,
        consumption_per_asset,
    ) = get_power_data(resource_dict)
    price_bdf_dict, average_price_dict = get_price_data(resource_dict)

    # Pick a perspective for summing and for stacking
    sum_dict = (demand_resources_df_dict.values() if show_summed
                == "consumption" else supply_resources_df_dict.values())
    power_sum_df = (pd.concat(sum_dict, axis=1).sum(axis=1).to_frame(
        name="event_value") if sum_dict else pd.DataFrame())

    # Create summed plot
    power_sum_df = data_or_zeroes(power_sum_df, start, end, resolution)
    x_range = plotting.make_range(
        pd.date_range(start, end, freq=resolution, closed="left"))
    fig_profile = plotting.create_graph(
        power_sum_df,
        unit="MW",
        title=plot_label,
        x_range=x_range,
        x_label="Time (resolution of %s)" %
        time_utils.freq_label_to_human_readable_label(resolution),
        y_label="Power (in MW)",
        legend_location="top_right",
        legend_labels=(capitalize(show_summed), None, None),
        show_y_floats=True,
        non_negative_only=True,
    )
    fig_profile.plot_height = 450
    fig_profile.plot_width = 900

    # Create stacked plot
    stack_dict = (rename_event_value_column_to_resource_name(
        supply_resources_df_dict).values() if show_summed == "consumption" else
                  rename_event_value_column_to_resource_name(
                      demand_resources_df_dict).values())
    df_stacked_data = pd.concat(stack_dict,
                                axis=1) if stack_dict else pd.DataFrame()
    df_stacked_data = data_or_zeroes(df_stacked_data, start, end, resolution)
    df_stacked_areas = stack_df(df_stacked_data)

    num_areas = df_stacked_areas.shape[1]
    if num_areas <= 2:
        colors = ["#99d594", "#dddd9d"]
    else:
        colors = palettes.brewer["Spectral"][num_areas]

    df_stacked_data = time_utils.tz_index_naively(df_stacked_data)
    x_points = np.hstack((df_stacked_data.index[::-1], df_stacked_data.index))

    fig_profile.grid.minor_grid_line_color = "#eeeeee"

    for a, area in enumerate(df_stacked_areas):
        fig_profile.patch(
            x_points,
            df_stacked_areas[area].values,
            color=colors[a],
            alpha=0.8,
            line_color=None,
            legend=df_stacked_data.columns[a],
            level="underlay",
        )

    portfolio_plots_script, portfolio_plots_divs = components(fig_profile)

    # Flexibility numbers and a mocked control action are mocked for demo mode at the moment
    flex_info = {}
    if current_app.config.get("FLEXMEASURES_MODE") == "demo":
        flex_info = mock_flex_info(assets, represented_asset_types)
        fig_actions = mock_flex_figure(x_range, power_sum_df.index,
                                       fig_profile.plot_width)
        mock_flex_action_in_main_figure(fig_profile)
        portfolio_plots_script, portfolio_plots_divs = components(
            (fig_profile, fig_actions))

    return render_flexmeasures_template(
        "views/portfolio.html",
        assets=assets,
        average_prices=average_price_dict,
        asset_types=represented_asset_types,
        markets=markets,
        production_per_asset=production_per_asset,
        consumption_per_asset=consumption_per_asset,
        production_per_asset_type=production_per_asset_type,
        consumption_per_asset_type=consumption_per_asset_type,
        sum_production=sum(production_per_asset_type.values()),
        sum_consumption=sum(consumption_per_asset_type.values()),
        flex_info=flex_info,
        portfolio_plots_script=portfolio_plots_script,
        portfolio_plots_divs=portfolio_plots_divs,
        alt_stacking=show_summed,
        fm_mode=current_app.config.get("FLEXMEASURES_MODE"),
    )