def post_price_data_response(
    unit,
    generic_asset_name_groups,
    horizon,
    rolling,
    value_groups,
    start,
    duration,
    resolution,
):

    current_app.logger.info("POSTING PRICE DATA")

    data_source = get_or_create_user_data_source(current_user)
    prices = []
    forecasting_jobs = []
    for market_group, value_group in zip(generic_asset_name_groups,
                                         value_groups):
        for market in market_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(market, entity_type="market")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            market_name = ea["market_name"]

            # Look for the Market object
            market = Market.query.filter(
                Market.name == market_name).one_or_none()
            if market is None:
                return unrecognized_market(market_name)
            elif unit != market.unit:
                return invalid_unit("%s prices" % market.display_name,
                                    [market.unit])

            # Create new Price objects
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - ((start + duration) -
                                   (dt + duration / len(value_group)))
                p = Price(
                    datetime=dt,
                    value=value,
                    horizon=h,
                    market_id=market.id,
                    data_source_id=data_source.id,
                )
                prices.append(p)

            # Make forecasts, but not in play mode. Price forecasts (horizon>0) can still lead to other price forecasts,
            # by the way, due to things like day-ahead markets.
            if current_app.config.get("FLEXMEASURES_MODE", "") != "play":
                # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data
                forecasting_jobs = create_forecasting_jobs(
                    "Price",
                    market.id,
                    max(start, start + duration - timedelta(hours=24)),
                    start + duration,
                    resolution=duration / len(value_group),
                    horizons=[timedelta(hours=24),
                              timedelta(hours=48)],
                    enqueue=
                    False,  # will enqueue later, only if we successfully saved prices
                )

    # Put these into the database
    current_app.logger.info("SAVING TO DB...")
    try:
        save_to_session(prices)
        db.session.flush()
        [
            current_app.queues["forecasting"].enqueue_job(job)
            for job in forecasting_jobs
        ]
        db.session.commit()
        return request_processed()
    except IntegrityError as e:
        current_app.logger.warning(e)
        db.session.rollback()

        # Allow price data to be replaced only in play mode
        if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
            save_to_session(prices, overwrite=True)
            [
                current_app.queues["forecasting"].enqueue_job(job)
                for job in forecasting_jobs
            ]
            db.session.commit()
            return request_processed()
        else:
            return already_received_and_successfully_processed()
def post_weather_data_response(  # noqa: C901
    unit,
    generic_asset_name_groups,
    horizon,
    rolling,
    value_groups,
    start,
    duration,
    resolution,
):
    current_app.logger.info("POSTING WEATHER DATA")
    data_source = get_or_create_user_data_source(current_user)
    weather_measurements = []
    forecasting_jobs = []
    for sensor_group, value_group in zip(generic_asset_name_groups,
                                         value_groups):
        for sensor in sensor_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(sensor, entity_type="sensor")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            weather_sensor_type_name = ea["weather_sensor_type_name"]
            latitude = ea["latitude"]
            longitude = ea["longitude"]

            # Check whether the unit is valid for this sensor type (e.g. no m/s allowed for temperature data)
            accepted_units = valid_sensor_units(weather_sensor_type_name)
            if unit not in accepted_units:
                return invalid_unit(weather_sensor_type_name, accepted_units)

            weather_sensor = get_weather_sensor_by(weather_sensor_type_name,
                                                   latitude, longitude)

            # Create new Weather objects
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - ((start + duration) -
                                   (dt + duration / len(value_group)))
                w = Weather(
                    datetime=dt,
                    value=value,
                    horizon=h,
                    sensor_id=weather_sensor.id,
                    data_source_id=data_source.id,
                )
                weather_measurements.append(w)

            # make forecasts, but only if the sent-in values are not forecasts themselves (and also not in play)
            if current_app.config.get(
                    "FLEXMEASURES_MODE", ""
            ) != "play" and horizon <= timedelta(
                    hours=0
            ):  # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset
                forecasting_jobs.extend(
                    create_forecasting_jobs(
                        "Weather",
                        weather_sensor.id,
                        start,
                        start + duration,
                        resolution=duration / len(value_group),
                        horizons=[horizon],
                        enqueue=
                        False,  # will enqueue later, only if we successfully saved weather measurements
                    ))

    # Put these into the database
    current_app.logger.info("SAVING TO DB...")
    try:
        save_to_session(weather_measurements)
        db.session.flush()
        [
            current_app.queues["forecasting"].enqueue_job(job)
            for job in forecasting_jobs
        ]
        db.session.commit()
        return request_processed()
    except IntegrityError as e:
        current_app.logger.warning(e)
        db.session.rollback()

        # Allow meter data to be replaced only in play mode
        if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
            save_to_session(weather_measurements, overwrite=True)
            [
                current_app.queues["forecasting"].enqueue_job(job)
                for job in forecasting_jobs
            ]
            db.session.commit()
            return request_processed()
        else:
            return already_received_and_successfully_processed()
def create_connection_and_value_groups(  # noqa: C901
        unit, generic_asset_name_groups, value_groups, horizon, rolling, start,
        duration):
    """
    Code for POSTing Power values to the API.
    Only lets users post to assets they own.
    The sign of values is validated according to asset specs, but in USEF terms.
    Then, we store the reverse sign for FlexMeasures specs (with positive production
    and negative consumption).

    If power values are not forecasts, forecasting jobs are created.
    """
    from flask import current_app

    current_app.logger.info("POSTING POWER DATA")
    data_source = get_or_create_user_data_source(current_user)
    user_assets = get_assets()
    if not user_assets:
        current_app.logger.info("User doesn't seem to have any assets")
    user_asset_ids = [asset.id for asset in user_assets]
    power_measurements = []
    forecasting_jobs = []
    for connection_group, value_group in zip(generic_asset_name_groups,
                                             value_groups):
        for connection in connection_group:

            # TODO: get asset through util function after refactoring
            # Parse the entity address
            try:
                connection = parse_entity_address(connection,
                                                  entity_type="connection")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            asset_id = connection["asset_id"]

            # Look for the Asset object
            if asset_id in user_asset_ids:
                asset = Asset.query.filter(Asset.id == asset_id).one_or_none()
            else:
                current_app.logger.warning("Cannot identify connection %s" %
                                           connection)
                return unrecognized_connection_group()

            # Validate the sign of the values (following USEF specs with positive consumption and negative production)
            if asset.is_pure_consumer and any(v < 0 for v in value_group):
                extra_info = (
                    "Connection %s is registered as a pure consumer and can only receive non-negative values."
                    % asset.entity_address)
                return power_value_too_small(extra_info)
            elif asset.is_pure_producer and any(v > 0 for v in value_group):
                extra_info = (
                    "Connection %s is registered as a pure producer and can only receive non-positive values."
                    % asset.entity_address)
                return power_value_too_big(extra_info)

            # Create new Power objects
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - ((start + duration) -
                                   (dt + duration / len(value_group)))
                p = Power(
                    datetime=dt,
                    value=value *
                    -1,  # Reverse sign for FlexMeasures specs with positive production and negative consumption
                    horizon=h,
                    asset_id=asset.id,
                    data_source_id=data_source.id,
                )
                power_measurements.append(p)

            # make forecasts, but only if the sent-in values are not forecasts themselves
            if horizon <= timedelta(
                    hours=0
            ):  # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset
                forecasting_jobs.extend(
                    create_forecasting_jobs(
                        "Power",
                        asset_id,
                        start,
                        start + duration,
                        resolution=duration / len(value_group),
                        enqueue=False,
                    ))

    current_app.logger.info("SAVING TO DB AND QUEUEING...")
    try:
        save_to_session(power_measurements)
        db.session.flush()
        [
            current_app.queues["forecasting"].enqueue_job(job)
            for job in forecasting_jobs
        ]
        db.session.commit()
        return request_processed()
    except IntegrityError as e:
        current_app.logger.warning(e)
        db.session.rollback()

        # Allow meter data to be replaced only in play mode
        if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
            save_to_session(power_measurements, overwrite=True)
            [
                current_app.queues["forecasting"].enqueue_job(job)
                for job in forecasting_jobs
            ]
            db.session.commit()
            return request_processed()
        else:
            return already_received_and_successfully_processed()
Esempio n. 4
0
def make_schedule(
    asset_id: int,
    start: datetime,
    end: datetime,
    belief_time: datetime,
    resolution: timedelta,
    soc_at_start: Optional[float] = None,
    soc_targets: Optional[pd.Series] = None,
) -> bool:
    """Preferably, a starting soc is given.
    Otherwise, we try to retrieve the current state of charge from the asset (if that is the valid one at the start).
    Otherwise, we set the starting soc to 0 (some assets don't use the concept of a state of charge,
    and without soc targets and limits the starting soc doesn't matter).
    """
    # https://docs.sqlalchemy.org/en/13/faq/connections.html#how-do-i-use-engines-connections-sessions-with-python-multiprocessing-or-os-fork
    db.engine.dispose()

    rq_job = get_current_job()

    # find asset
    asset = Asset.query.filter_by(id=asset_id).one_or_none()

    click.echo(
        "Running Scheduling Job %s: %s, from %s to %s" % (rq_job.id, asset, start, end)
    )

    if soc_at_start is None:
        if start == asset.soc_datetime and asset.soc_in_mwh is not None:
            soc_at_start = asset.soc_in_mwh
        else:
            soc_at_start = 0

    if soc_targets is None:
        soc_targets = pd.Series(
            np.nan, index=pd.date_range(start, end, freq=resolution, closed="right")
        )

    if asset.asset_type_name == "battery":
        consumption_schedule = schedule_battery(
            asset, asset.market, start, end, resolution, soc_at_start, soc_targets
        )
    elif asset.asset_type_name in (
        "one-way_evse",
        "two-way_evse",
    ):
        consumption_schedule = schedule_charging_station(
            asset, asset.market, start, end, resolution, soc_at_start, soc_targets
        )
    else:
        raise ValueError(
            "Scheduling is not supported for asset type %s." % asset.asset_type
        )

    data_source = get_data_source(
        data_source_name="Seita",
        data_source_type="scheduling script",
    )
    click.echo("Job %s made schedule." % rq_job.id)

    ts_value_schedule = [
        Power(
            datetime=dt,
            horizon=dt.astimezone(pytz.utc) - belief_time.astimezone(pytz.utc),
            value=-value,
            asset_id=asset_id,
            data_source_id=data_source.id,
        )
        for dt, value in consumption_schedule.items()
    ]  # For consumption schedules, positive values denote consumption. For the db, consumption is negative

    try:
        save_to_session(ts_value_schedule)
    except IntegrityError as e:

        current_app.logger.warning(e)
        click.echo("Rolling back due to IntegrityError")
        db.session.rollback()

        if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
            click.echo("Saving again, with overwrite=True")
            save_to_session(ts_value_schedule, overwrite=True)

    db.session.commit()

    return True
Esempio n. 5
0
def save_to_db(
    timed_values: Union[BeliefsDataFrame, List[Union[Power, Price, Weather]]],
    forecasting_jobs: List[Job] = [],
    save_changed_beliefs_only: bool = True,
) -> ResponseTuple:
    """Put the timed values into the database and enqueue forecasting jobs.

    Data can only be replaced on servers in play mode.

    TODO: remove this legacy function in its entirety (announced v0.8.0)

    :param timed_values: BeliefsDataFrame or a list of Power, Price or Weather values to be saved
    :param forecasting_jobs: list of forecasting Jobs for redis queues.
    :param save_changed_beliefs_only: if True, beliefs that are already stored in the database with an earlier belief time are dropped.
    :returns: ResponseTuple
    """

    import warnings

    warnings.warn(
        "The method api.common.utils.api_utils.save_to_db is deprecated. Check out the following replacements:"
        "- [recommended option] to store BeliefsDataFrames only, switch to data.utils.save_to_db"
        "- to store BeliefsDataFrames and enqueue jobs, switch to api.common.utils.api_utils.save_and_enqueue"
    )

    if isinstance(timed_values, BeliefsDataFrame):

        if save_changed_beliefs_only:
            # Drop beliefs that haven't changed
            timed_values = (
                timed_values.convert_index_from_belief_horizon_to_time()
                .groupby(level=["belief_time", "source"], as_index=False)
                .apply(drop_unchanged_beliefs)
            )

            # Work around bug in which groupby still introduces an index level, even though we asked it not to
            if None in timed_values.index.names:
                timed_values.index = timed_values.index.droplevel(None)

        if timed_values.empty:
            current_app.logger.debug("Nothing new to save")
            return already_received_and_successfully_processed()

    current_app.logger.info("SAVING TO DB AND QUEUEING...")
    try:
        if isinstance(timed_values, BeliefsDataFrame):
            TimedBelief.add_to_session(
                session=db.session, beliefs_data_frame=timed_values
            )
        else:
            save_to_session(timed_values)
        db.session.flush()
        [current_app.queues["forecasting"].enqueue_job(job) for job in forecasting_jobs]
        db.session.commit()
        return request_processed()
    except IntegrityError as e:
        current_app.logger.warning(e)
        db.session.rollback()

        # Possibly allow data to be replaced depending on config setting
        if current_app.config.get("FLEXMEASURES_ALLOW_DATA_OVERWRITE", False):
            if isinstance(timed_values, BeliefsDataFrame):
                TimedBelief.add_to_session(
                    session=db.session,
                    beliefs_data_frame=timed_values,
                    allow_overwrite=True,
                )
            else:
                save_to_session(timed_values, overwrite=True)
            [
                current_app.queues["forecasting"].enqueue_job(job)
                for job in forecasting_jobs
            ]
            db.session.commit()
            return request_processed()
        else:
            return already_received_and_successfully_processed()
Esempio n. 6
0
def make_forecasts(
    asset_id: int,
    timed_value_type: str,
    horizon: timedelta,
    start: datetime,
    end: datetime,
    custom_model_params: dict = None,
) -> int:
    """
    Build forecasting model specs, make rolling forecasts, save the forecasts made.
    Each individual forecast is a belief about an interval.
    Returns the number of forecasts made.

    Parameters
    ----------
    :param asset_id: int
        To identify which asset to forecast
    :param timed_value_type: str
        This should go away after a refactoring - we now use it to create the DB entry for the forecasts
    :param horizon: timedelta
        duration between the end of each interval and the time at which the belief about that interval is formed
    :param start: datetime
        start of forecast period, i.e. start time of the first interval to be forecast
    :param end: datetime
        end of forecast period, i.e end time of the last interval to be forecast
    :param custom_model_params: dict
        pass in params which will be passed to the model specs configurator,
        e.g. outcome_var_transformation, only advisable to be used for testing.
    """
    # https://docs.sqlalchemy.org/en/13/faq/connections.html#how-do-i-use-engines-connections-sessions-with-python-multiprocessing-or-os-fork
    db.engine.dispose()

    rq_job = get_current_job()

    # find out which model to run, fall back to latest recommended
    model_search_term = rq_job.meta.get("model_search_term", "linear-OLS")

    # find asset
    asset = get_asset(asset_id, timed_value_type)

    click.echo(
        "Running Forecasting Job %s: %s for %s on model '%s', from %s to %s"
        % (rq_job.id, asset, horizon, model_search_term, start, end)
    )

    if hasattr(asset, "market_type"):
        ex_post_horizon = None  # Todo: until we sorted out the ex_post_horizon, use all available price data
    else:
        ex_post_horizon = timedelta(hours=0)

    # Make model specs
    model_configurator = lookup_model_specs_configurator(model_search_term)
    model_specs, model_identifier, fallback_model_search_term = model_configurator(
        generic_asset=asset,
        forecast_start=as_server_time(start),
        forecast_end=as_server_time(end),
        forecast_horizon=horizon,
        ex_post_horizon=ex_post_horizon,
        custom_model_params=custom_model_params,
    )
    model_specs.creation_time = server_now()

    rq_job.meta["model_identifier"] = model_identifier
    rq_job.meta["fallback_model_search_term"] = fallback_model_search_term
    rq_job.save()

    # before we run the model, check if horizon is okay and enough data is available
    if horizon not in supported_horizons():
        raise InvalidHorizonException(
            "Invalid horizon on job %s: %s" % (rq_job.id, horizon)
        )

    query_window = get_query_window(
        model_specs.start_of_training,
        end,
        [lag * model_specs.frequency for lag in model_specs.lags],
    )
    check_data_availability(
        asset,
        determine_asset_value_class_by_asset(asset),
        start,
        end,
        query_window,
        horizon,
    )

    data_source = get_data_source(
        data_source_name="Seita (%s)"
        % rq_job.meta.get("model_identifier", "unknown model"),
        data_source_type="forecasting script",
    )

    forecasts, model_state = make_rolling_forecasts(
        start=as_server_time(start),
        end=as_server_time(end),
        model_specs=model_specs,
    )
    click.echo("Job %s made %d forecasts." % (rq_job.id, len(forecasts)))

    ts_value_forecasts = [
        make_timed_value(timed_value_type, asset_id, dt, value, horizon, data_source.id)
        for dt, value in forecasts.items()
    ]

    try:
        save_to_session(ts_value_forecasts)
    except IntegrityError as e:

        current_app.logger.warning(e)
        click.echo("Rolling back due to IntegrityError")
        db.session.rollback()

        if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
            click.echo("Saving again, with overwrite=True")
            save_to_session(ts_value_forecasts, overwrite=True)

    db.session.commit()

    return len(forecasts)