Beispiel #1
0
def get_power_data(
    resource_dict: Dict[str, Resource]
) -> Tuple[
    Dict[str, pd.DataFrame],
    Dict[str, pd.DataFrame],
    Dict[str, float],
    Dict[str, float],
    Dict[str, float],
    Dict[str, float],
]:
    """Get power data, separating demand and supply,
    as time series per resource and as totals (summed over time) per resource and per asset.

    Getting sensor data of a Resource leads to database queries (unless results are already cached).

    :returns: a tuple comprising:
        - a dictionary of resource names (as keys) and a DataFrame with aggregated time series of supply (as values)
        - a dictionary of resource names (as keys) and a DataFrame with aggregated time series of demand (as values)
        - a dictionary of resource names (as keys) and their total supply summed over time (as values)
        - a dictionary of resource names (as keys) and their total demand summed over time (as values)
        - a dictionary of asset names (as keys) and their total supply summed over time (as values)
        - a dictionary of asset names (as keys) and their total demand summed over time (as values)
    """

    # Load power data (separate demand and supply, and group data per resource)
    supply_per_resource: Dict[
        str, pd.DataFrame
    ] = {}  # power >= 0, production/supply >= 0
    demand_per_resource: Dict[
        str, pd.DataFrame
    ] = {}  # power <= 0, consumption/demand >=0 !!!
    total_supply_per_asset: Dict[str, float] = {}
    total_demand_per_asset: Dict[str, float] = {}
    for resource_name, resource in resource_dict.items():
        if (resource.aggregate_demand.values != 0).any():
            demand_per_resource[resource_name] = simplify_index(
                resource.aggregate_demand
            )
        if (resource.aggregate_supply.values != 0).any():
            supply_per_resource[resource_name] = simplify_index(
                resource.aggregate_supply
            )
        total_supply_per_asset = {**total_supply_per_asset, **resource.total_supply}
        total_demand_per_asset = {**total_demand_per_asset, **resource.total_demand}
    total_supply_per_resource = {
        k: v.total_aggregate_supply for k, v in resource_dict.items()
    }
    total_demand_per_resource = {
        k: v.total_aggregate_demand for k, v in resource_dict.items()
    }
    return (
        supply_per_resource,
        demand_per_resource,
        total_supply_per_resource,
        total_demand_per_resource,
        total_supply_per_asset,
        total_demand_per_asset,
    )
Beispiel #2
0
 def cost(self) -> Dict[str, float]:
     """Returns each asset's total cost from demand."""
     cost_dict = {}
     for k, v in self.demand.items():
         market_name = self.asset_name_to_market_name_map[k]
         if market_name is not None:
             cost_dict[k] = (simplify_index(v) * simplify_index(
                 self.price_data[market_name])).sum(
                 ).values[0] * time_utils.resolution_to_hour_factor(
                     v.event_resolution)
         else:
             cost_dict[k] = None
     return cost_dict
Beispiel #3
0
def aggregate_values(
        bdf_dict: Dict[str, tb.BeliefsDataFrame]) -> tb.BeliefsDataFrame:

    # todo: test this function rigorously, e.g. with empty bdfs in bdf_dict
    # todo: consider 1 bdf with beliefs from source A, plus 1 bdf with beliefs from source B -> 1 bdf with sources A+B
    # todo: consider 1 bdf with beliefs from sources A and B, plus 1 bdf with beliefs from source C. -> 1 bdf with sources A+B and A+C
    # todo: consider 1 bdf with beliefs from sources A and B, plus 1 bdf with beliefs from source C and D. -> 1 bdf with sources A+B, A+C, B+C and B+D
    # Relevant issue: https://github.com/SeitaBV/timely-beliefs/issues/33
    unique_source_ids: List[int] = []
    for bdf in bdf_dict.values():
        unique_source_ids.extend(bdf.lineage.sources)
        if not bdf.lineage.unique_beliefs_per_event_per_source:
            current_app.logger.warning(
                "Not implemented: only aggregation of deterministic uni-source beliefs (1 per event) is properly supported"
            )
        if bdf.lineage.number_of_sources > 1:
            current_app.logger.warning(
                "Not implemented: aggregating multi-source beliefs about the same sensor."
            )
    if len(set(unique_source_ids)) > 1:
        current_app.logger.warning(
            f"Not implemented: aggregating multi-source beliefs. Source {unique_source_ids[1:]} will be treated as if source {unique_source_ids[0]}"
        )

    data_as_bdf = tb.BeliefsDataFrame()
    for k, v in bdf_dict.items():
        if data_as_bdf.empty:
            data_as_bdf = v.copy()
        elif not v.empty:
            data_as_bdf["event_value"] = data_as_bdf["event_value"].add(
                simplify_index(v.copy())["event_value"],
                fill_value=0,
                level="event_start",
            )  # we only look at the event_start index level and sum up duplicates that level
    return data_as_bdf
    def _load_series(self) -> pd.Series:
        logger.info("Reading %s data from database" %
                    self.time_series_class.__name__)

        bdf: BeliefsDataFrame = getattr(self.time_series_class,
                                        self.search_fnc)(**self.search_params)
        assert isinstance(bdf, BeliefsDataFrame)
        df = simplify_index(bdf)
        self.check_data(df)

        if self.post_load_processing is not None:
            df = self.post_load_processing.transform_dataframe(df)

        return df["event_value"]
Beispiel #5
0
def test_simplify_index(setup_test_data, check_empty_frame):
    """Check whether simplify_index retains the event resolution."""
    wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none()
    bdf: tb.BeliefsDataFrame = TimedBelief.search(
        wind_device_1.name,
        event_starts_after=datetime(2015, 1, 1, tzinfo=pytz.utc),
        event_ends_before=datetime(2015, 1, 2, tzinfo=pytz.utc),
        resolution=timedelta(minutes=15),
    ).convert_index_from_belief_time_to_horizon()
    if check_empty_frame:
        # We empty the BeliefsDataFrame, which retains the metadata such as sensor and resolution
        bdf = bdf.iloc[0:0, :]
    df = simplify_index(bdf)
    assert df.event_resolution == timedelta(minutes=15)
Beispiel #6
0
def get_power_data(
    resource: Union[str, Resource],  # name or instance
    show_consumption_as_positive: bool,
    showing_individual_traces_for: str,
    metrics: dict,
    query_window: Tuple[datetime, datetime],
    resolution: str,
    forecast_horizon: timedelta,
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, dict]:
    """Get power data and metrics.

    Return power observations, power forecasts and power schedules (each might be an empty DataFrame)
    and a dict with the following metrics:
    - expected value
    - mean absolute error
    - mean absolute percentage error
    - weighted absolute percentage error

    Todo: Power schedules ignore horizon.
    """
    if isinstance(resource, str):
        resource = Resource(resource)

    default_columns = ["event_value", "belief_horizon", "source"]

    # Get power data
    if showing_individual_traces_for != "schedules":
        resource.load_sensor_data(
            sensor_types=[Power],
            start=query_window[0],
            end=query_window[-1],
            resolution=resolution,
            belief_horizon_window=(None, timedelta(hours=0)),
            exclude_source_types=["scheduling script"],
        )
        if showing_individual_traces_for == "power":
            power_bdf = resource.power_data
            # In this case, power_bdf is actually a dict of BeliefDataFrames.
            # We join the frames into one frame, remembering -per frame- the sensor name as source.
            power_bdf = pd.concat([
                set_bdf_source(bdf, sensor_name)
                for sensor_name, bdf in power_bdf.items()
            ])
        else:
            # Here, we aggregate all rows together
            power_bdf = resource.aggregate_power_data
        power_df: pd.DataFrame = simplify_index(
            power_bdf, index_levels_to_columns=["belief_horizon", "source"])
        if showing_individual_traces_for == "power":
            # In this case, we keep on indexing by source (as we have more than one)
            power_df.set_index("source", append=True, inplace=True)
    else:
        power_df = pd.DataFrame(columns=default_columns)

    # Get power forecast
    if showing_individual_traces_for == "none":
        power_forecast_bdf: tb.BeliefsDataFrame = resource.load_sensor_data(
            sensor_types=[Power],
            start=query_window[0],
            end=query_window[-1],
            resolution=resolution,
            belief_horizon_window=(forecast_horizon, None),
            exclude_source_types=["scheduling script"],
        ).aggregate_power_data
        power_forecast_df: pd.DataFrame = simplify_index(
            power_forecast_bdf,
            index_levels_to_columns=["belief_horizon", "source"])
    else:
        power_forecast_df = pd.DataFrame(columns=default_columns)

    # Get power schedule
    if showing_individual_traces_for != "power":
        resource.load_sensor_data(
            sensor_types=[Power],
            start=query_window[0],
            end=query_window[-1],
            resolution=resolution,
            belief_horizon_window=(None, None),
            source_types=["scheduling script"],
        )
        if showing_individual_traces_for == "schedules":
            power_schedule_bdf = resource.power_data
            power_schedule_bdf = pd.concat([
                set_bdf_source(bdf, sensor_name)
                for sensor_name, bdf in power_schedule_bdf.items()
            ])
        else:
            power_schedule_bdf = resource.aggregate_power_data
        power_schedule_df: pd.DataFrame = simplify_index(
            power_schedule_bdf,
            index_levels_to_columns=["belief_horizon", "source"])
        if showing_individual_traces_for == "schedules":
            power_schedule_df.set_index("source", append=True, inplace=True)
    else:
        power_schedule_df = pd.DataFrame(columns=default_columns)

    if show_consumption_as_positive:
        power_df["event_value"] *= -1
        power_forecast_df["event_value"] *= -1
        power_schedule_df["event_value"] *= -1

    # Calculate the power metrics
    power_hour_factor = time_utils.resolution_to_hour_factor(resolution)
    realised_power_in_mwh = pd.Series(power_df["event_value"] *
                                      power_hour_factor).values

    if not power_df.empty:
        metrics["realised_power_in_mwh"] = np.nansum(realised_power_in_mwh)
    else:
        metrics["realised_power_in_mwh"] = np.NaN
    if not power_forecast_df.empty and power_forecast_df.size == power_df.size:
        expected_power_in_mwh = pd.Series(power_forecast_df["event_value"] *
                                          power_hour_factor).values
        metrics["expected_power_in_mwh"] = np.nansum(expected_power_in_mwh)
        metrics["mae_power_in_mwh"] = calculations.mean_absolute_error(
            realised_power_in_mwh, expected_power_in_mwh)
        metrics["mape_power"] = calculations.mean_absolute_percentage_error(
            realised_power_in_mwh, expected_power_in_mwh)
        metrics[
            "wape_power"] = calculations.weighted_absolute_percentage_error(
                realised_power_in_mwh, expected_power_in_mwh)
    else:
        metrics["expected_power_in_mwh"] = np.NaN
        metrics["mae_power_in_mwh"] = np.NaN
        metrics["mape_power"] = np.NaN
        metrics["wape_power"] = np.NaN
    return power_df, power_forecast_df, power_schedule_df, metrics
Beispiel #7
0
def get_weather_data(
    assets: List[Asset],
    metrics: dict,
    sensor_type: WeatherSensorType,
    query_window: Tuple[datetime, datetime],
    resolution: str,
    forecast_horizon: timedelta,
) -> Tuple[pd.DataFrame, pd.DataFrame, str, Sensor, dict]:
    """Get most recent weather data and forecast weather data for the requested forecast horizon.

    Return weather observations, weather forecasts (either might be an empty DataFrame),
    the name of the sensor type, the weather sensor and a dict with the following metrics:
    - expected value
    - mean absolute error
    - mean absolute percentage error
    - weighted absolute percentage error"""

    # Todo: for now we only collect weather data for a single asset
    asset = assets[0]

    weather_data = tb.BeliefsDataFrame(columns=["event_value"])
    weather_forecast_data = tb.BeliefsDataFrame(columns=["event_value"])
    sensor_type_name = ""
    closest_sensor = None
    if sensor_type:
        # Find the 50 closest weather sensors
        sensor_type_name = sensor_type.name
        closest_sensors = Sensor.find_closest(
            generic_asset_type_name=asset.generic_asset.generic_asset_type.
            name,
            sensor_name=sensor_type_name,
            n=50,
            object=asset,
        )
        if closest_sensors:
            closest_sensor = closest_sensors[0]

            # Collect the weather data for the requested time window
            sensor_names = [sensor.name for sensor in closest_sensors]

            # Get weather data
            weather_bdf_dict: Dict[str,
                                   tb.BeliefsDataFrame] = TimedBelief.search(
                                       sensor_names,
                                       event_starts_after=query_window[0],
                                       event_ends_before=query_window[1],
                                       resolution=resolution,
                                       horizons_at_least=None,
                                       horizons_at_most=timedelta(hours=0),
                                       sum_multiple=False,
                                   )
            weather_df_dict: Dict[str, pd.DataFrame] = {}
            for sensor_name in weather_bdf_dict:
                weather_df_dict[sensor_name] = simplify_index(
                    weather_bdf_dict[sensor_name],
                    index_levels_to_columns=["belief_horizon", "source"],
                )

            # Get weather forecasts
            weather_forecast_bdf_dict: Dict[
                str, tb.BeliefsDataFrame] = TimedBelief.search(
                    sensor_names,
                    event_starts_after=query_window[0],
                    event_ends_before=query_window[1],
                    resolution=resolution,
                    horizons_at_least=forecast_horizon,
                    horizons_at_most=None,
                    source_types=["user", "forecasting script", "script"],
                    sum_multiple=False,
                )
            weather_forecast_df_dict: Dict[str, pd.DataFrame] = {}
            for sensor_name in weather_forecast_bdf_dict:
                weather_forecast_df_dict[sensor_name] = simplify_index(
                    weather_forecast_bdf_dict[sensor_name],
                    index_levels_to_columns=["belief_horizon", "source"],
                )

            # Take the closest weather sensor which contains some data for the selected time window
            for sensor, sensor_name in zip(closest_sensors, sensor_names):
                if (not weather_df_dict[sensor_name]
                    ["event_value"].isnull().values.all()
                        or not weather_forecast_df_dict[sensor_name]
                    ["event_value"].isnull().values.all()):
                    closest_sensor = sensor
                    break

            weather_data = weather_df_dict[sensor_name]
            weather_forecast_data = weather_forecast_df_dict[sensor_name]

            # Calculate the weather metrics
            if not weather_data.empty:
                metrics["realised_weather"] = weather_data["event_value"].mean(
                )
            else:
                metrics["realised_weather"] = np.NaN
            if (not weather_forecast_data.empty
                    and weather_forecast_data.size == weather_data.size):
                metrics["expected_weather"] = weather_forecast_data[
                    "event_value"].mean()
                metrics["mae_weather"] = calculations.mean_absolute_error(
                    weather_data["event_value"],
                    weather_forecast_data["event_value"])
                metrics[
                    "mape_weather"] = calculations.mean_absolute_percentage_error(
                        weather_data["event_value"],
                        weather_forecast_data["event_value"])
                metrics[
                    "wape_weather"] = calculations.weighted_absolute_percentage_error(
                        weather_data["event_value"],
                        weather_forecast_data["event_value"])
            else:
                metrics["expected_weather"] = np.NaN
                metrics["mae_weather"] = np.NaN
                metrics["mape_weather"] = np.NaN
                metrics["wape_weather"] = np.NaN
    return (
        weather_data,
        weather_forecast_data,
        sensor_type_name,
        closest_sensor,
        metrics,
    )
Beispiel #8
0
def get_prices_data(
    metrics: dict,
    market_sensor: Sensor,
    query_window: Tuple[datetime, datetime],
    resolution: str,
    forecast_horizon: timedelta,
) -> Tuple[pd.DataFrame, pd.DataFrame, dict]:
    """Get price data and metrics.

    Return price observations, price forecasts (either might be an empty DataFrame)
    and a dict with the following metrics:
    - expected value
    - mean absolute error
    - mean absolute percentage error
    - weighted absolute percentage error
    """

    market_name = "" if market_sensor is None else market_sensor.name

    # Get price data
    price_bdf: tb.BeliefsDataFrame = TimedBelief.search(
        [market_name],
        event_starts_after=query_window[0],
        event_ends_before=query_window[1],
        resolution=resolution,
        horizons_at_least=None,
        horizons_at_most=timedelta(hours=0),
    )
    price_df: pd.DataFrame = simplify_index(
        price_bdf, index_levels_to_columns=["belief_horizon", "source"])

    if not price_bdf.empty:
        metrics["realised_unit_price"] = price_df["event_value"].mean()
    else:
        metrics["realised_unit_price"] = np.NaN

    # Get price forecast
    price_forecast_bdf: tb.BeliefsDataFrame = TimedBelief.search(
        [market_name],
        event_starts_after=query_window[0],
        event_ends_before=query_window[1],
        resolution=resolution,
        horizons_at_least=forecast_horizon,
        horizons_at_most=None,
        source_types=["user", "forecasting script", "script"],
    )
    price_forecast_df: pd.DataFrame = simplify_index(
        price_forecast_bdf,
        index_levels_to_columns=["belief_horizon", "source"])

    # Calculate the price metrics
    if not price_forecast_df.empty and price_forecast_df.size == price_df.size:
        metrics["expected_unit_price"] = price_forecast_df["event_value"].mean(
        )
        metrics["mae_unit_price"] = calculations.mean_absolute_error(
            price_df["event_value"], price_forecast_df["event_value"])
        metrics[
            "mape_unit_price"] = calculations.mean_absolute_percentage_error(
                price_df["event_value"], price_forecast_df["event_value"])
        metrics[
            "wape_unit_price"] = calculations.weighted_absolute_percentage_error(
                price_df["event_value"], price_forecast_df["event_value"])
    else:
        metrics["expected_unit_price"] = np.NaN
        metrics["mae_unit_price"] = np.NaN
        metrics["mape_unit_price"] = np.NaN
        metrics["wape_unit_price"] = np.NaN
    return price_df, price_forecast_df, metrics
Beispiel #9
0
    def get_schedule(self, sensor: Sensor, job_id: str, duration: timedelta,
                     **kwargs):
        """Get a schedule from FlexMeasures.

        .. :quickref: Schedule; Download schedule from the platform

        **Optional fields**

        - "duration" (6 hours by default; can be increased to plan further into the future)

        **Example response**

        This message contains a schedule indicating to consume at various power
        rates from 10am UTC onwards for a duration of 45 minutes.

        .. sourcecode:: json

            {
                "values": [
                    2.15,
                    3,
                    2
                ],
                "start": "2015-06-02T10:00:00+00:00",
                "duration": "PT45M",
                "unit": "MW"
            }

        :reqheader Authorization: The authentication token
        :reqheader Content-Type: application/json
        :resheader Content-Type: application/json
        :status 200: PROCESSED
        :status 400: INVALID_TIMEZONE, INVALID_DOMAIN, INVALID_UNIT, UNKNOWN_SCHEDULE, UNRECOGNIZED_CONNECTION_GROUP
        :status 401: UNAUTHORIZED
        :status 403: INVALID_SENDER
        :status 405: INVALID_METHOD
        :status 422: UNPROCESSABLE_ENTITY
        """

        planning_horizon = min(  # type: ignore
            duration, current_app.config.get("FLEXMEASURES_PLANNING_HORIZON"))

        # Look up the scheduling job
        connection = current_app.queues["scheduling"].connection
        try:  # First try the scheduling queue
            job = Job.fetch(job_id, connection=connection)
        except NoSuchJobError:
            return unrecognized_event(job_id, "job")
        if job.is_finished:
            error_message = "A scheduling job has been processed with your job ID, but "
        elif job.is_failed:  # Try to inform the user on why the job failed
            e = job.meta.get(
                "exception",
                Exception(
                    "The job does not state why it failed. "
                    "The worker may be missing an exception handler, "
                    "or its exception handler is not storing the exception as job meta data."
                ),
            )
            return unknown_schedule(
                f"Scheduling job failed with {type(e).__name__}: {e}")
        elif job.is_started:
            return unknown_schedule("Scheduling job in progress.")
        elif job.is_queued:
            return unknown_schedule("Scheduling job waiting to be processed.")
        elif job.is_deferred:
            try:
                preferred_job = job.dependency
            except NoSuchJobError:
                return unknown_schedule(
                    "Scheduling job waiting for unknown job to be processed.")
            return unknown_schedule(
                f'Scheduling job waiting for {preferred_job.status} job "{preferred_job.id}" to be processed.'
            )
        else:
            return unknown_schedule("Scheduling job has an unknown status.")
        schedule_start = job.kwargs["start"]

        schedule_data_source_name = "Seita"
        scheduler_source = DataSource.query.filter_by(
            name="Seita", type="scheduling script").one_or_none()
        if scheduler_source is None:
            return unknown_schedule(
                error_message +
                f'no data is known from "{schedule_data_source_name}".')

        power_values = sensor.search_beliefs(
            event_starts_after=schedule_start,
            event_ends_before=schedule_start + planning_horizon,
            source=scheduler_source,
            most_recent_beliefs_only=True,
            one_deterministic_belief_per_event=True,
        )
        # For consumption schedules, positive values denote consumption. For the db, consumption is negative
        consumption_schedule = -simplify_index(power_values)["event_value"]
        if consumption_schedule.empty:
            return unknown_schedule(
                error_message + "the schedule was not found in the database.")

        # Update the planning window
        resolution = sensor.event_resolution
        start = consumption_schedule.index[0]
        duration = min(duration,
                       consumption_schedule.index[-1] + resolution - start)
        consumption_schedule = consumption_schedule[start:start + duration -
                                                    resolution]
        response = dict(
            values=consumption_schedule.tolist(),
            start=isodate.datetime_isoformat(start),
            duration=isodate.duration_isoformat(duration),
            unit=sensor.unit,
        )

        d, s = request_processed()
        return dict(**response, **d), s
Beispiel #10
0
def get_device_message_response(generic_asset_name_groups, duration):

    unit = "MW"
    planning_horizon = min(
        duration, current_app.config.get("FLEXMEASURES_PLANNING_HORIZON"))

    if not has_assets():
        current_app.logger.info("User doesn't seem to have any assets.")

    value_groups = []
    new_event_groups = []
    for event_group in generic_asset_name_groups:
        for event in event_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(event,
                                          entity_type="event",
                                          fm_scheme="fm0")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            sensor_id = ea["asset_id"]
            event_id = ea["event_id"]
            event_type = ea["event_type"]

            # Look for the Sensor object
            sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none()
            if sensor is None or not can_access_asset(sensor):
                current_app.logger.warning(
                    "Cannot identify sensor given the event %s." % event)
                return unrecognized_connection_group()
            if sensor.generic_asset.generic_asset_type.name not in (
                    "battery",
                    "one-way_evse",
                    "two-way_evse",
            ):
                return invalid_domain(
                    f"API version 1.3 only supports device messages for batteries and Electric Vehicle Supply Equipment (EVSE). "
                    f"Sensor ID:{sensor_id} does not belong to a battery or EVSE, but {p.a(sensor.generic_asset.generic_asset_type.description)}."
                )

            # Use the event_id to look up the schedule start
            if event_type not in ("soc", "soc-with-targets"):
                return unrecognized_event_type(event_type)
            connection = current_app.queues["scheduling"].connection
            try:  # First try the scheduling queue
                job = Job.fetch(event, connection=connection)
            except NoSuchJobError:  # Then try the most recent event_id (stored as a generic asset attribute)
                if event_id == sensor.generic_asset.get_attribute(
                        "soc_udi_event_id"):
                    schedule_start = datetime.fromisoformat(
                        sensor.generic_asset.get_attribute("soc_datetime"))
                    message = (
                        "Your UDI event is the most recent event for this device, but "
                    )
                else:
                    return unrecognized_event(event_id, event_type)
            else:
                if job.is_finished:
                    message = "A scheduling job has been processed based on your UDI event, but "
                elif job.is_failed:  # Try to inform the user on why the job failed
                    e = job.meta.get(
                        "exception",
                        Exception(
                            "The job does not state why it failed. "
                            "The worker may be missing an exception handler, "
                            "or its exception handler is not storing the exception as job meta data."
                        ),
                    )
                    return unknown_schedule(
                        f"Scheduling job failed with {type(e).__name__}: {e}")
                elif job.is_started:
                    return unknown_schedule("Scheduling job in progress.")
                elif job.is_queued:
                    return unknown_schedule(
                        "Scheduling job waiting to be processed.")
                elif job.is_deferred:
                    try:
                        preferred_job = job.dependency
                    except NoSuchJobError:
                        return unknown_schedule(
                            "Scheduling job waiting for unknown job to be processed."
                        )
                    return unknown_schedule(
                        f'Scheduling job waiting for {preferred_job.status} job "{preferred_job.id}" to be processed.'
                    )
                else:
                    return unknown_schedule(
                        "Scheduling job has an unknown status.")
                schedule_start = job.kwargs["start"]

            schedule_data_source_name = "Seita"
            scheduler_source = DataSource.query.filter_by(
                name="Seita", type="scheduling script").one_or_none()
            if scheduler_source is None:
                return unknown_schedule(
                    message +
                    f'no data is known from "{schedule_data_source_name}".')

            power_values = sensor.search_beliefs(
                event_starts_after=schedule_start,
                event_ends_before=schedule_start + planning_horizon,
                source=scheduler_source,
                most_recent_beliefs_only=True,
                one_deterministic_belief_per_event=True,
            )
            # For consumption schedules, positive values denote consumption. For the db, consumption is negative
            consumption_schedule = -simplify_index(power_values)["event_value"]
            if consumption_schedule.empty:
                return unknown_schedule(
                    message + "the schedule was not found in the database.")

            # Update the planning window
            resolution = sensor.event_resolution
            start = consumption_schedule.index[0]
            duration = min(duration,
                           consumption_schedule.index[-1] + resolution - start)
            consumption_schedule = consumption_schedule[start:start +
                                                        duration - resolution]
            value_groups.append(consumption_schedule.tolist())
            new_event_groups.append(event)

    response = groups_to_dict(new_event_groups,
                              value_groups,
                              generic_asset_type_name="event")
    response["start"] = isodate.datetime_isoformat(start)
    response["duration"] = isodate.duration_isoformat(duration)
    response["unit"] = unit

    d, s = request_processed()
    return dict(**response, **d), s