Example #1
0
def create_power_forecasts(
    asset_id: int,
    from_date: str,
    to_date: str,
    timezone: str = "Asia/Seoul",
    horizon_hours: int = 1,
):
    """Creates a forecasting job.

    Useful to run locally and create forecasts on a remote server. In that case, just point the redis db in your
    config settings to that of the remote server. To process the job, run a worker to process the forecasting queue.

    For example:

        from_data = "2015-02-02"
        to_date = "2015-02-04"
        horizon_hours = 6

        This creates 1 job that forecasts values from 0am on May 2nd to 0am on May 4th,
        based on a 6 hour horizon.
        Note that this time period refers to the period of events we are forecasting, while in create_forecasting_jobs
        the time period refers to the period of belief_times, therefore we are subtracting the horizon.
    """
    create_forecasting_jobs(
        asset_id=asset_id,
        timed_value_type="Power",
        horizons=[timedelta(hours=horizon_hours)],
        start_of_roll=pd.Timestamp(from_date).tz_localize(timezone)
        - timedelta(hours=horizon_hours),
        end_of_roll=pd.Timestamp(to_date).tz_localize(timezone)
        - timedelta(hours=horizon_hours),
    )
Example #2
0
def test_failed_forecasting_invalid_horizon(app, run_as_cli, clean_redis,
                                            setup_test_data):
    """This one (as well as the fallback) should fail as the horizon is invalid."""
    solar_device1: Sensor = Sensor.query.filter_by(
        name="solar-asset-1").one_or_none()
    create_forecasting_jobs(
        start_of_roll=as_server_time(datetime(2015, 1, 1, 21)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 23)),
        horizons=[timedelta(hours=18)],
        sensor_id=solar_device1.id,
        custom_model_params=custom_model_params(),
    )
    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)
    check_failures(app.queues["forecasting"], 2 * ["InvalidHorizonException"])
Example #3
0
def test_forecasting_an_hour_of_wind(db, run_as_cli, app, setup_test_data):
    """Test one clean run of one job:
    - data source was made,
    - forecasts have been made
    """
    wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none()

    assert get_data_source() is None

    # makes 4 forecasts
    horizon = timedelta(hours=1)
    job = create_forecasting_jobs(
        start_of_roll=as_server_time(datetime(2015, 1, 1, 6)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 7)),
        horizons=[horizon],
        sensor_id=wind_device_1.id,
        custom_model_params=custom_model_params(),
    )

    print("Job: %s" % job[0].id)

    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    assert get_data_source() is not None

    forecasts = (TimedBelief.query.filter(
        TimedBelief.sensor_id == wind_device_1.id
    ).filter(TimedBelief.belief_horizon == horizon).filter(
        (TimedBelief.event_start >= as_server_time(datetime(2015, 1, 1, 7)))
        & (TimedBelief.event_start < as_server_time(datetime(2015, 1, 1, 8)))).
                 all())
    assert len(forecasts) == 4
    check_aggregate(4, horizon, wind_device_1.id)
Example #4
0
def test_failed_forecasting_insufficient_data(app, run_as_cli, clean_redis,
                                              setup_test_data):
    """This one (as well as the fallback) should fail as there is no underlying data.
    (Power data is in 2015)"""
    solar_device1: Sensor = Sensor.query.filter_by(
        name="solar-asset-1").one_or_none()
    create_forecasting_jobs(
        start_of_roll=as_server_time(datetime(2016, 1, 1, 20)),
        end_of_roll=as_server_time(datetime(2016, 1, 1, 22)),
        horizons=[timedelta(hours=1)],
        sensor_id=solar_device1.id,
        custom_model_params=custom_model_params(),
    )
    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)
    check_failures(app.queues["forecasting"], 2 * ["NotEnoughDataException"])
Example #5
0
def test_forecasting_two_hours_of_solar(
    app, run_as_cli, setup_fresh_test_data, clean_redis
):
    solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none()
    wind_device2: Sensor = Sensor.query.filter_by(name="wind-asset-2").one_or_none()
    print(solar_device1)
    print(wind_device2)

    # makes 8 forecasts
    horizon = timedelta(hours=1)
    job = create_forecasting_jobs(
        start_of_roll=as_server_time(datetime(2015, 1, 1, 12)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 14)),
        horizons=[horizon],
        sensor_id=solar_device1.id,
        custom_model_params=custom_model_params(),
    )
    print("Job: %s" % job[0].id)

    work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception)
    forecasts = (
        TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id)
        .filter(TimedBelief.belief_horizon == horizon)
        .filter(
            (TimedBelief.event_start >= as_server_time(datetime(2015, 1, 1, 13)))
            & (TimedBelief.event_start < as_server_time(datetime(2015, 1, 1, 15)))
        )
        .all()
    )
    assert len(forecasts) == 8
    check_aggregate(8, horizon, solar_device1.id)
Example #6
0
def test_forecasting_three_hours_of_wind(db, app):
    wind_device2: Asset = Asset.query.filter_by(
        name="wind-asset-2").one_or_none()

    # makes 12 forecasts
    horizon = timedelta(hours=1)
    job = create_forecasting_jobs(
        timed_value_type="Power",
        start_of_roll=as_server_time(datetime(2015, 1, 1, 10)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 13)),
        horizons=[horizon],
        asset_id=wind_device2.id,
        custom_model_params=custom_model_params(),
    )
    print("Job: %s" % job[0].id)

    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    forecasts = (Power.query.filter(Power.asset_id == wind_device2.id).filter(
        Power.horizon == horizon).filter(
            (Power.datetime >= as_server_time(datetime(2015, 1, 1, 11)))
            &
            (Power.datetime < as_server_time(datetime(2015, 1, 1, 14)))).all())
    assert len(forecasts) == 12
    check_aggregate(12, horizon)
Example #7
0
def test_forecasting_an_hour_of_wind(db, app):
    """Test one clean run of one job:
    - data source was made,
    - forecasts have been made
    """
    wind_device_1 = Asset.query.filter_by(name="wind-asset-1").one_or_none()

    assert get_data_source() is None

    # makes 4 forecasts
    horizon = timedelta(hours=1)
    job = create_forecasting_jobs(
        timed_value_type="Power",
        start_of_roll=as_server_time(datetime(2015, 1, 1, 6)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 7)),
        horizons=[horizon],
        asset_id=wind_device_1.id,
        custom_model_params=custom_model_params(),
    )

    print("Job: %s" % job[0].id)

    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    assert get_data_source() is not None

    forecasts = (Power.query.filter(Power.asset_id == wind_device_1.id).filter(
        Power.horizon == horizon).filter(
            (Power.datetime >= as_server_time(datetime(2015, 1, 1, 7)))
            &
            (Power.datetime < as_server_time(datetime(2015, 1, 1, 8)))).all())
    assert len(forecasts) == 4
    check_aggregate(4, horizon)
Example #8
0
def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app):
    solar_device1: Asset = Asset.query.filter_by(
        name="solar-asset-1").one_or_none()

    last_power_datetime = (
        (Power.query.filter(Power.asset_id == solar_device1.id).filter(
            Power.horizon == timedelta(hours=0)).order_by(
                Power.datetime.desc())).first().datetime
    )  # datetime index of the last power value 11.45pm (Jan 1st)

    # makes 4 forecasts, 1 of which is for a new datetime index
    horizon = timedelta(hours=6)
    job = create_forecasting_jobs(
        timed_value_type="Power",
        start_of_roll=last_power_datetime - horizon - timedelta(
            minutes=30),  # start of data on which forecast is based (5.15pm)
        end_of_roll=last_power_datetime - horizon + timedelta(
            minutes=30),  # end of data on which forecast is based (6.15pm)
        horizons=[
            timedelta(hours=6)
        ],  # so we want forecasts for 11.15pm (Jan 1st) to 0.15am (Jan 2nd)
        asset_id=solar_device1.id,
        custom_model_params=custom_model_params(),
    )
    print("Job: %s" % job[0].id)

    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    forecasts = (Power.query.filter(Power.asset_id == solar_device1.id).filter(
        Power.horizon == horizon).filter(
            Power.datetime > last_power_datetime).all())
    assert len(forecasts) == 1
    check_aggregate(4, horizon)
Example #9
0
def test_failed_unknown_model(app, clean_redis, setup_test_data):
    """This one should fail because we use a model search term which yields no model configurator."""
    solar_device1: Sensor = Sensor.query.filter_by(
        name="solar-asset-1").one_or_none()
    horizon = timedelta(hours=1)

    cmp = custom_model_params()
    cmp["training_and_testing_period"] = timedelta(days=365)

    create_forecasting_jobs(
        start_of_roll=as_server_time(datetime(2015, 1, 1, 12)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 14)),
        horizons=[horizon],
        sensor_id=solar_device1.id,
        model_search_term="no-one-knows-this",
        custom_model_params=cmp,
    )
    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    check_failures(app.queues["forecasting"],
                   ["No model found for search term"])
Example #10
0
def test_making_forecasts():
    """
    Manual test to enqueue and process a forecasting job via redis queue
    """

    click.echo("Manual forecasting job queuing started ...")

    asset_id = 1
    forecast_filter = (Power.query.filter(Power.asset_id == asset_id).filter(
        Power.horizon == timedelta(hours=6)).filter(
            (Power.datetime >= as_server_time(datetime(2015, 4, 1, 6)))
            & (Power.datetime < as_server_time(datetime(2015, 4, 3, 6)))))

    click.echo("Delete forecasts ...")
    forecast_filter.delete()
    click.echo("Forecasts found before : %d" % forecast_filter.count())

    create_forecasting_jobs(
        asset_id=asset_id,
        timed_value_type="Power",
        horizons=[timedelta(hours=6)],
        start_of_roll=as_server_time(datetime(2015, 4, 1)),
        end_of_roll=as_server_time(datetime(2015, 4, 3)),
    )

    click.echo("Queue before working: %s" % app.queues["forecasting"].jobs)

    worker = Worker(
        [app.queues["forecasting"]],
        connection=app.queues["forecasting"].connection,
        name="Test CLI Forecaster",
        exception_handlers=[handle_forecasting_exception],
    )
    worker.work()
    click.echo("Queue after working: %s" % app.queues["forecasting"].jobs)

    click.echo("Forecasts found after (should be 24 * 2 * 4 = 192): %d" %
               forecast_filter.count())
Example #11
0
def post_weather_data_response(  # noqa: C901
    unit,
    generic_asset_name_groups,
    horizon,
    rolling,
    value_groups,
    start,
    duration,
    resolution,
):

    current_app.logger.info("POSTING WEATHER DATA")

    data_source = get_or_create_source(current_user)
    weather_df_per_sensor = []
    forecasting_jobs = []
    for sensor_group, value_group in zip(generic_asset_name_groups, value_groups):
        for sensor in sensor_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(
                    sensor, entity_type="weather_sensor", fm_scheme="fm0"
                )
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            weather_sensor_type_name = ea["weather_sensor_type_name"]
            latitude = ea["latitude"]
            longitude = ea["longitude"]

            # Check whether the unit is valid for this sensor type (e.g. no m/s allowed for temperature data)
            accepted_units = valid_sensor_units(weather_sensor_type_name)
            if unit not in accepted_units:
                return invalid_unit(weather_sensor_type_name, accepted_units)

            sensor = get_sensor_by_generic_asset_type_and_location(
                weather_sensor_type_name, latitude, longitude
            )
            if is_response_tuple(sensor):
                # Error message telling the user about the nearest weather sensor they can post to
                return sensor

            # Create new Weather objects
            beliefs = []
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - (
                        (start + duration) - (dt + duration / len(value_group))
                    )
                w = TimedBelief(
                    event_start=dt,
                    event_value=value,
                    belief_horizon=h,
                    sensor=sensor,
                    source=data_source,
                )
                beliefs.append(w)
            weather_df_per_sensor.append(tb.BeliefsDataFrame(beliefs))

            # make forecasts, but only if the sent-in values are not forecasts themselves (and also not in play)
            if current_app.config.get(
                "FLEXMEASURES_MODE", ""
            ) != "play" and horizon <= timedelta(
                hours=0
            ):  # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this sensor
                forecasting_jobs.extend(
                    create_forecasting_jobs(
                        sensor.id,
                        start,
                        start + duration,
                        resolution=duration / len(value_group),
                        enqueue=False,  # will enqueue later, after saving data
                    )
                )

    return save_and_enqueue(weather_df_per_sensor, forecasting_jobs)
Example #12
0
def post_power_data(
    unit,
    generic_asset_name_groups,
    value_groups,
    horizon,
    prior,
    start,
    duration,
    resolution,
    create_forecasting_jobs_too,
):

    # additional validation, todo: to be moved into Marshmallow
    if horizon is None and prior is None:
        extra_info = "Missing horizon or prior."
        return invalid_horizon(extra_info)

    current_app.logger.info("POSTING POWER DATA")

    data_source = get_or_create_source(current_user)
    user_sensors = get_sensors()
    if not user_sensors:
        current_app.logger.info("User doesn't seem to have any assets")
    user_sensor_ids = [sensor.id for sensor in user_sensors]
    power_df_per_connection = []
    forecasting_jobs = []
    for connection_group, event_values in zip(generic_asset_name_groups, value_groups):
        for connection in connection_group:

            # TODO: get asset through util function after refactoring
            # Parse the entity address
            try:
                ea = parse_entity_address(connection, entity_type="connection")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            sensor_id = ea["sensor_id"]

            # Look for the Sensor object
            if sensor_id in user_sensor_ids:
                sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none()
            else:
                current_app.logger.warning("Cannot identify connection %s" % connection)
                return unrecognized_connection_group()

            # Validate the sign of the values (following USEF specs with positive consumption and negative production)
            if sensor.get_attribute("is_strictly_non_positive") and any(
                v < 0 for v in event_values
            ):
                extra_info = (
                    "Connection %s is registered as a pure consumer and can only receive non-negative values."
                    % sensor.entity_address
                )
                return power_value_too_small(extra_info)
            elif sensor.get_attribute("is_strictly_non_negative") and any(
                v > 0 for v in event_values
            ):
                extra_info = (
                    "Connection %s is registered as a pure producer and can only receive non-positive values."
                    % sensor.entity_address
                )
                return power_value_too_big(extra_info)

            # Convert to timely-beliefs terminology
            event_starts, belief_horizons = determine_belief_timing(
                event_values, start, resolution, horizon, prior, sensor
            )

            # Create new Power objects
            beliefs = [
                TimedBelief(
                    event_start=event_start,
                    event_value=event_value
                    * -1,  # Reverse sign for FlexMeasures specs with positive production and negative consumption
                    belief_horizon=belief_horizon,
                    sensor=sensor,
                    source=data_source,
                )
                for event_start, event_value, belief_horizon in zip(
                    event_starts, event_values, belief_horizons
                )
            ]
            power_df_per_connection.append(tb.BeliefsDataFrame(beliefs))

            if create_forecasting_jobs_too:
                forecasting_jobs.extend(
                    create_forecasting_jobs(
                        sensor_id,
                        start,
                        start + duration,
                        resolution=duration / len(event_values),
                        enqueue=False,  # will enqueue later, after saving data
                    )
                )

    return save_and_enqueue(power_df_per_connection, forecasting_jobs)
Example #13
0
def post_weather_data_response(  # noqa: C901
    unit,
    generic_asset_name_groups,
    horizon,
    prior,
    value_groups,
    start,
    duration,
    resolution,
) -> ResponseTuple:
    # additional validation, todo: to be moved into Marshmallow
    if horizon is None and prior is None:
        extra_info = "Missing horizon or prior."
        return invalid_horizon(extra_info)

    current_app.logger.info("POSTING WEATHER DATA")

    data_source = get_or_create_source(current_user)
    weather_df_per_sensor = []
    forecasting_jobs = []
    for sensor_group, event_values in zip(generic_asset_name_groups, value_groups):
        for sensor in sensor_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(sensor, entity_type="weather_sensor")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            weather_sensor_type_name = ea["weather_sensor_type_name"]
            latitude = ea["latitude"]
            longitude = ea["longitude"]

            # Check whether the unit is valid for this sensor type (e.g. no m/s allowed for temperature data)
            accepted_units = valid_sensor_units(weather_sensor_type_name)
            if unit not in accepted_units:
                return invalid_unit(weather_sensor_type_name, accepted_units)

            sensor: Sensor = get_sensor_by_generic_asset_type_and_location(
                weather_sensor_type_name, latitude, longitude
            )

            # Convert to timely-beliefs terminology
            event_starts, belief_horizons = determine_belief_timing(
                event_values, start, resolution, horizon, prior, sensor
            )

            # Create new Weather objects
            beliefs = [
                TimedBelief(
                    event_start=event_start,
                    event_value=event_value,
                    belief_horizon=belief_horizon,
                    sensor=sensor,
                    source=data_source,
                )
                for event_start, event_value, belief_horizon in zip(
                    event_starts, event_values, belief_horizons
                )
            ]
            weather_df_per_sensor.append(tb.BeliefsDataFrame(beliefs))

            # make forecasts, but only if the sent-in values are not forecasts themselves (and also not in play)
            if current_app.config.get(
                "FLEXMEASURES_MODE", ""
            ) != "play" and horizon <= timedelta(
                hours=0
            ):  # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset
                forecasting_jobs.extend(
                    create_forecasting_jobs(
                        sensor.id,
                        start,
                        start + duration,
                        resolution=duration / len(event_values),
                        horizons=[horizon],
                        enqueue=False,  # will enqueue later, after saving data
                    )
                )

    return save_and_enqueue(weather_df_per_sensor, forecasting_jobs)
def create_connection_and_value_groups(  # noqa: C901
        unit, generic_asset_name_groups, value_groups, horizon, rolling, start,
        duration):
    """
    Code for POSTing Power values to the API.
    Only lets users post to assets they own.
    The sign of values is validated according to asset specs, but in USEF terms.
    Then, we store the reverse sign for FlexMeasures specs (with positive production
    and negative consumption).

    If power values are not forecasts, forecasting jobs are created.
    """
    from flask import current_app

    current_app.logger.info("POSTING POWER DATA")
    data_source = get_or_create_user_data_source(current_user)
    user_assets = get_assets()
    if not user_assets:
        current_app.logger.info("User doesn't seem to have any assets")
    user_asset_ids = [asset.id for asset in user_assets]
    power_measurements = []
    forecasting_jobs = []
    for connection_group, value_group in zip(generic_asset_name_groups,
                                             value_groups):
        for connection in connection_group:

            # TODO: get asset through util function after refactoring
            # Parse the entity address
            try:
                connection = parse_entity_address(connection,
                                                  entity_type="connection")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            asset_id = connection["asset_id"]

            # Look for the Asset object
            if asset_id in user_asset_ids:
                asset = Asset.query.filter(Asset.id == asset_id).one_or_none()
            else:
                current_app.logger.warning("Cannot identify connection %s" %
                                           connection)
                return unrecognized_connection_group()

            # Validate the sign of the values (following USEF specs with positive consumption and negative production)
            if asset.is_pure_consumer and any(v < 0 for v in value_group):
                extra_info = (
                    "Connection %s is registered as a pure consumer and can only receive non-negative values."
                    % asset.entity_address)
                return power_value_too_small(extra_info)
            elif asset.is_pure_producer and any(v > 0 for v in value_group):
                extra_info = (
                    "Connection %s is registered as a pure producer and can only receive non-positive values."
                    % asset.entity_address)
                return power_value_too_big(extra_info)

            # Create new Power objects
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - ((start + duration) -
                                   (dt + duration / len(value_group)))
                p = Power(
                    datetime=dt,
                    value=value *
                    -1,  # Reverse sign for FlexMeasures specs with positive production and negative consumption
                    horizon=h,
                    asset_id=asset.id,
                    data_source_id=data_source.id,
                )
                power_measurements.append(p)

            # make forecasts, but only if the sent-in values are not forecasts themselves
            if horizon <= timedelta(
                    hours=0
            ):  # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset
                forecasting_jobs.extend(
                    create_forecasting_jobs(
                        "Power",
                        asset_id,
                        start,
                        start + duration,
                        resolution=duration / len(value_group),
                        enqueue=False,
                    ))

    current_app.logger.info("SAVING TO DB AND QUEUEING...")
    try:
        save_to_session(power_measurements)
        db.session.flush()
        [
            current_app.queues["forecasting"].enqueue_job(job)
            for job in forecasting_jobs
        ]
        db.session.commit()
        return request_processed()
    except IntegrityError as e:
        current_app.logger.warning(e)
        db.session.rollback()

        # Allow meter data to be replaced only in play mode
        if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
            save_to_session(power_measurements, overwrite=True)
            [
                current_app.queues["forecasting"].enqueue_job(job)
                for job in forecasting_jobs
            ]
            db.session.commit()
            return request_processed()
        else:
            return already_received_and_successfully_processed()
Example #15
0
def test_failed_model_with_too_much_training_then_succeed_with_fallback(
        app, model_to_start_with, model_version):
    """
    Here we fail once - because we start with a model that needs too much training.
    So we check for this failure happening as expected.
    But then, we do succeed with the fallback model one level down.
    (fail-test falls back to linear & linear falls back to naive).
    As a result, there should be forecasts in the DB.
    """
    solar_device1: Asset = Asset.query.filter_by(
        name="solar-asset-1").one_or_none()
    horizon_hours = 1
    horizon = timedelta(hours=horizon_hours)

    cmp = custom_model_params()
    hour_start = 5
    if model_to_start_with == "linear-OLS":
        # making the linear model fail and fall back to naive
        hour_start = 3  # Todo: explain this parameter; why would it fail to forecast if data is there for the full day?

    # The failed test model (this failure enqueues a new job)
    create_forecasting_jobs(
        timed_value_type="Power",
        start_of_roll=as_server_time(datetime(2015, 1, 1, hour_start)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, hour_start + 2)),
        horizons=[horizon],
        asset_id=solar_device1.id,
        model_search_term=model_to_start_with,
        custom_model_params=cmp,
    )
    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    # Check if the correct model failed in the expected way
    check_failures(
        app.queues["forecasting"],
        ["NotEnoughDataException"],
        ["%s model v%d" % (model_to_start_with, model_version)],
    )

    # this query is useful to check data:
    def make_query(the_horizon_hours: int) -> Query:
        the_horizon = timedelta(hours=the_horizon_hours)
        return (Power.query.filter(Power.asset_id == solar_device1.id).filter(
            Power.horizon == the_horizon).filter(
                (Power.datetime >= as_server_time(
                    datetime(2015, 1, 1, hour_start + the_horizon_hours)))
                & (Power.datetime < as_server_time(
                    datetime(2015, 1, 1, hour_start + the_horizon_hours + 2))))
                )

    # The successful (linear or naive) OLS leads to these.
    forecasts = make_query(the_horizon_hours=horizon_hours).all()

    assert len(forecasts) == 8
    check_aggregate(8, horizon)

    if model_to_start_with == "linear-OLS":
        existing_data = make_query(the_horizon_hours=0).all()

        for ed, fd in zip(existing_data, forecasts):
            assert ed.value == fd.value

    # Now to check which models actually got to work.
    # We check which data sources do and do not exist by now:
    assert (get_data_source("failing-test model v1") is None
            )  # the test failure model failed -> no data source
    if model_to_start_with == "linear-OLS":
        assert (
            get_data_source() is None
        )  # the default (linear regression) (was made to) fail, as well
        assert (get_data_source("naive model v1")
                is not None)  # the naive one had to be used
    else:
        assert get_data_source() is not None  # the default (linear regression)
        assert (get_data_source("naive model v1") is None
                )  # the naive one did not have to be used
def post_weather_data_response(  # noqa: C901
    unit,
    generic_asset_name_groups,
    horizon,
    rolling,
    value_groups,
    start,
    duration,
    resolution,
):
    current_app.logger.info("POSTING WEATHER DATA")
    data_source = get_or_create_user_data_source(current_user)
    weather_measurements = []
    forecasting_jobs = []
    for sensor_group, value_group in zip(generic_asset_name_groups,
                                         value_groups):
        for sensor in sensor_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(sensor, entity_type="sensor")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            weather_sensor_type_name = ea["weather_sensor_type_name"]
            latitude = ea["latitude"]
            longitude = ea["longitude"]

            # Check whether the unit is valid for this sensor type (e.g. no m/s allowed for temperature data)
            accepted_units = valid_sensor_units(weather_sensor_type_name)
            if unit not in accepted_units:
                return invalid_unit(weather_sensor_type_name, accepted_units)

            weather_sensor = get_weather_sensor_by(weather_sensor_type_name,
                                                   latitude, longitude)

            # Create new Weather objects
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - ((start + duration) -
                                   (dt + duration / len(value_group)))
                w = Weather(
                    datetime=dt,
                    value=value,
                    horizon=h,
                    sensor_id=weather_sensor.id,
                    data_source_id=data_source.id,
                )
                weather_measurements.append(w)

            # make forecasts, but only if the sent-in values are not forecasts themselves (and also not in play)
            if current_app.config.get(
                    "FLEXMEASURES_MODE", ""
            ) != "play" and horizon <= timedelta(
                    hours=0
            ):  # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset
                forecasting_jobs.extend(
                    create_forecasting_jobs(
                        "Weather",
                        weather_sensor.id,
                        start,
                        start + duration,
                        resolution=duration / len(value_group),
                        horizons=[horizon],
                        enqueue=
                        False,  # will enqueue later, only if we successfully saved weather measurements
                    ))

    # Put these into the database
    current_app.logger.info("SAVING TO DB...")
    try:
        save_to_session(weather_measurements)
        db.session.flush()
        [
            current_app.queues["forecasting"].enqueue_job(job)
            for job in forecasting_jobs
        ]
        db.session.commit()
        return request_processed()
    except IntegrityError as e:
        current_app.logger.warning(e)
        db.session.rollback()

        # Allow meter data to be replaced only in play mode
        if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
            save_to_session(weather_measurements, overwrite=True)
            [
                current_app.queues["forecasting"].enqueue_job(job)
                for job in forecasting_jobs
            ]
            db.session.commit()
            return request_processed()
        else:
            return already_received_and_successfully_processed()
def post_price_data_response(
    unit,
    generic_asset_name_groups,
    horizon,
    rolling,
    value_groups,
    start,
    duration,
    resolution,
):

    current_app.logger.info("POSTING PRICE DATA")

    data_source = get_or_create_user_data_source(current_user)
    prices = []
    forecasting_jobs = []
    for market_group, value_group in zip(generic_asset_name_groups,
                                         value_groups):
        for market in market_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(market, entity_type="market")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            market_name = ea["market_name"]

            # Look for the Market object
            market = Market.query.filter(
                Market.name == market_name).one_or_none()
            if market is None:
                return unrecognized_market(market_name)
            elif unit != market.unit:
                return invalid_unit("%s prices" % market.display_name,
                                    [market.unit])

            # Create new Price objects
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - ((start + duration) -
                                   (dt + duration / len(value_group)))
                p = Price(
                    datetime=dt,
                    value=value,
                    horizon=h,
                    market_id=market.id,
                    data_source_id=data_source.id,
                )
                prices.append(p)

            # Make forecasts, but not in play mode. Price forecasts (horizon>0) can still lead to other price forecasts,
            # by the way, due to things like day-ahead markets.
            if current_app.config.get("FLEXMEASURES_MODE", "") != "play":
                # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data
                forecasting_jobs = create_forecasting_jobs(
                    "Price",
                    market.id,
                    max(start, start + duration - timedelta(hours=24)),
                    start + duration,
                    resolution=duration / len(value_group),
                    horizons=[timedelta(hours=24),
                              timedelta(hours=48)],
                    enqueue=
                    False,  # will enqueue later, only if we successfully saved prices
                )

    # Put these into the database
    current_app.logger.info("SAVING TO DB...")
    try:
        save_to_session(prices)
        db.session.flush()
        [
            current_app.queues["forecasting"].enqueue_job(job)
            for job in forecasting_jobs
        ]
        db.session.commit()
        return request_processed()
    except IntegrityError as e:
        current_app.logger.warning(e)
        db.session.rollback()

        # Allow price data to be replaced only in play mode
        if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
            save_to_session(prices, overwrite=True)
            [
                current_app.queues["forecasting"].enqueue_job(job)
                for job in forecasting_jobs
            ]
            db.session.commit()
            return request_processed()
        else:
            return already_received_and_successfully_processed()
Example #18
0
def post_price_data_response(  # noqa C901
    unit,
    generic_asset_name_groups,
    horizon,
    prior,
    value_groups,
    start,
    duration,
    resolution,
) -> ResponseTuple:

    # additional validation, todo: to be moved into Marshmallow
    if horizon is None and prior is None:
        extra_info = "Missing horizon or prior."
        return invalid_horizon(extra_info)

    current_app.logger.info("POSTING PRICE DATA")

    data_source = get_or_create_source(current_user)
    price_df_per_market = []
    forecasting_jobs = []
    for market_group, event_values in zip(generic_asset_name_groups, value_groups):
        for market in market_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(market, entity_type="market")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            sensor_id = ea["sensor_id"]

            # Look for the Sensor object
            sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none()
            if sensor is None:
                return unrecognized_market(sensor_id)
            elif unit != sensor.unit:
                return invalid_unit("%s prices" % sensor.name, [sensor.unit])

            # Convert to timely-beliefs terminology
            event_starts, belief_horizons = determine_belief_timing(
                event_values, start, resolution, horizon, prior, sensor
            )

            # Create new Price objects
            beliefs = [
                TimedBelief(
                    event_start=event_start,
                    event_value=event_value,
                    belief_horizon=belief_horizon,
                    sensor=sensor,
                    source=data_source,
                )
                for event_start, event_value, belief_horizon in zip(
                    event_starts, event_values, belief_horizons
                )
            ]
            price_df_per_market.append(tb.BeliefsDataFrame(beliefs))

            # Make forecasts, but not in play mode. Price forecasts (horizon>0) can still lead to other price forecasts,
            # by the way, due to things like day-ahead markets.
            if current_app.config.get("FLEXMEASURES_MODE", "") != "play":
                # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data
                forecasting_jobs = create_forecasting_jobs(
                    sensor.id,
                    max(start, start + duration - timedelta(hours=24)),
                    start + duration,
                    resolution=duration / len(event_values),
                    horizons=[timedelta(hours=24), timedelta(hours=48)],
                    enqueue=False,  # will enqueue later, after saving data
                )

    return save_and_enqueue(price_df_per_market, forecasting_jobs)
Example #19
0
def post_price_data_response(
    unit,
    generic_asset_name_groups,
    horizon,
    rolling,
    value_groups,
    start,
    duration,
    resolution,
):

    current_app.logger.info("POSTING PRICE DATA")

    data_source = get_or_create_source(current_user)
    price_df_per_market = []
    forecasting_jobs = []
    for market_group, value_group in zip(generic_asset_name_groups, value_groups):
        for market in market_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(market, entity_type="market", fm_scheme="fm0")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            market_name = ea["market_name"]

            # Look for the Sensor object
            sensor = get_sensor_by_unique_name(market_name, ["day_ahead", "tou_tariff"])
            if is_response_tuple(sensor):
                # Error message telling the user what to do
                return sensor
            if unit != sensor.unit:
                return invalid_unit("%s prices" % sensor.name, [sensor.unit])

            # Create new Price objects
            beliefs = []
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - (
                        (start + duration) - (dt + duration / len(value_group))
                    )
                p = TimedBelief(
                    event_start=dt,
                    event_value=value,
                    belief_horizon=h,
                    sensor=sensor,
                    source=data_source,
                )
                beliefs.append(p)
            price_df_per_market.append(tb.BeliefsDataFrame(beliefs))

            # Make forecasts, but not in play mode. Price forecasts (horizon>0) can still lead to other price forecasts,
            # by the way, due to things like day-ahead markets.
            if current_app.config.get("FLEXMEASURES_MODE", "") != "play":
                # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data
                forecasting_jobs = create_forecasting_jobs(
                    sensor.id,
                    max(start, start + duration - timedelta(hours=24)),
                    start + duration,
                    resolution=duration / len(value_group),
                    horizons=[timedelta(hours=24), timedelta(hours=48)],
                    enqueue=False,  # will enqueue later, after saving data
                )

    return save_and_enqueue(price_df_per_market, forecasting_jobs)
Example #20
0
def create_connection_and_value_groups(  # noqa: C901
    unit, generic_asset_name_groups, value_groups, horizon, rolling, start, duration
):
    """
    Code for POSTing Power values to the API.
    Only lets users post to assets they own.
    The sign of values is validated according to asset specs, but in USEF terms.
    Then, we store the reverse sign for FlexMeasures specs (with positive production
    and negative consumption).

    If power values are not forecasts, forecasting jobs are created.
    """

    current_app.logger.info("POSTING POWER DATA")

    data_source = get_or_create_source(current_user)
    user_sensors = get_sensors()
    if not user_sensors:
        current_app.logger.info("User doesn't seem to have any assets")
    user_sensor_ids = [sensor.id for sensor in user_sensors]
    power_df_per_connection = []
    forecasting_jobs = []
    for connection_group, value_group in zip(generic_asset_name_groups, value_groups):
        for connection in connection_group:

            # TODO: get asset through util function after refactoring
            # Parse the entity address
            try:
                connection = parse_entity_address(
                    connection, entity_type="connection", fm_scheme="fm0"
                )
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            sensor_id = connection["asset_id"]

            # Look for the Sensor object
            if sensor_id in user_sensor_ids:
                sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none()
            else:
                current_app.logger.warning("Cannot identify connection %s" % connection)
                return unrecognized_connection_group()

            # Validate the sign of the values (following USEF specs with positive consumption and negative production)
            if sensor.get_attribute("is_strictly_non_positive") and any(
                v < 0 for v in value_group
            ):
                extra_info = (
                    "Connection %s is registered as a pure consumer and can only receive non-negative values."
                    % sensor.entity_address
                )
                return power_value_too_small(extra_info)
            elif sensor.get_attribute("is_strictly_non_negative") and any(
                v > 0 for v in value_group
            ):
                extra_info = (
                    "Connection %s is registered as a pure producer and can only receive non-positive values."
                    % sensor.entity_address
                )
                return power_value_too_big(extra_info)

            # Create a new BeliefsDataFrame
            beliefs = []
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - (
                        (start + duration) - (dt + duration / len(value_group))
                    )
                p = TimedBelief(
                    event_start=dt,
                    event_value=value
                    * -1,  # Reverse sign for FlexMeasures specs with positive production and negative consumption
                    belief_horizon=h,
                    sensor=sensor,
                    source=data_source,
                )

                assert p not in db.session
                beliefs.append(p)
            power_df_per_connection.append(tb.BeliefsDataFrame(beliefs))

            # make forecasts, but only if the sent-in values are not forecasts themselves
            if horizon <= timedelta(
                hours=0
            ):  # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this sensor
                forecasting_jobs.extend(
                    create_forecasting_jobs(
                        sensor_id,
                        start,
                        start + duration,
                        resolution=duration / len(value_group),
                        enqueue=False,  # will enqueue later, after saving data
                    )
                )

    return save_and_enqueue(power_df_per_connection, forecasting_jobs)