예제 #1
0
def test_scheduling_a_battery(db, app, add_battery_assets, setup_test_data):
    """Test one clean run of one scheduling job:
    - data source was made,
    - schedule has been made
    """

    battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none()
    start = as_server_time(datetime(2015, 1, 2))
    end = as_server_time(datetime(2015, 1, 3))
    resolution = timedelta(minutes=15)

    assert (DataSource.query.filter_by(
        name="Seita", type="scheduling script").one_or_none() is None
            )  # Make sure the scheduler data source isn't there

    job = create_scheduling_job(battery.id,
                                start,
                                end,
                                belief_time=start,
                                resolution=resolution)

    print("Job: %s" % job.id)

    work_on_rq(app.queues["scheduling"], exc_handler=exception_reporter)

    scheduler_source = DataSource.query.filter_by(
        name="Seita", type="scheduling script").one_or_none()
    assert (scheduler_source
            is not None)  # Make sure the scheduler data source is now there

    power_values = (TimedBelief.query.filter(
        TimedBelief.sensor_id == battery.id).filter(
            TimedBelief.source_id == scheduler_source.id).all())
    print([v.event_value for v in power_values])
    assert len(power_values) == 96
예제 #2
0
def test_forecasting_an_hour_of_wind(db, run_as_cli, app, setup_test_data):
    """Test one clean run of one job:
    - data source was made,
    - forecasts have been made
    """
    wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none()

    assert get_data_source() is None

    # makes 4 forecasts
    horizon = timedelta(hours=1)
    job = create_forecasting_jobs(
        start_of_roll=as_server_time(datetime(2015, 1, 1, 6)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 7)),
        horizons=[horizon],
        sensor_id=wind_device_1.id,
        custom_model_params=custom_model_params(),
    )

    print("Job: %s" % job[0].id)

    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    assert get_data_source() is not None

    forecasts = (TimedBelief.query.filter(
        TimedBelief.sensor_id == wind_device_1.id
    ).filter(TimedBelief.belief_horizon == horizon).filter(
        (TimedBelief.event_start >= as_server_time(datetime(2015, 1, 1, 7)))
        & (TimedBelief.event_start < as_server_time(datetime(2015, 1, 1, 8)))).
                 all())
    assert len(forecasts) == 4
    check_aggregate(4, horizon, wind_device_1.id)
예제 #3
0
def test_forecasting_three_hours_of_wind(db, app):
    wind_device2: Asset = Asset.query.filter_by(
        name="wind-asset-2").one_or_none()

    # makes 12 forecasts
    horizon = timedelta(hours=1)
    job = create_forecasting_jobs(
        timed_value_type="Power",
        start_of_roll=as_server_time(datetime(2015, 1, 1, 10)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 13)),
        horizons=[horizon],
        asset_id=wind_device2.id,
        custom_model_params=custom_model_params(),
    )
    print("Job: %s" % job[0].id)

    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    forecasts = (Power.query.filter(Power.asset_id == wind_device2.id).filter(
        Power.horizon == horizon).filter(
            (Power.datetime >= as_server_time(datetime(2015, 1, 1, 11)))
            &
            (Power.datetime < as_server_time(datetime(2015, 1, 1, 14)))).all())
    assert len(forecasts) == 12
    check_aggregate(12, horizon)
예제 #4
0
def test_forecasting_an_hour_of_wind(db, app):
    """Test one clean run of one job:
    - data source was made,
    - forecasts have been made
    """
    wind_device_1 = Asset.query.filter_by(name="wind-asset-1").one_or_none()

    assert get_data_source() is None

    # makes 4 forecasts
    horizon = timedelta(hours=1)
    job = create_forecasting_jobs(
        timed_value_type="Power",
        start_of_roll=as_server_time(datetime(2015, 1, 1, 6)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 7)),
        horizons=[horizon],
        asset_id=wind_device_1.id,
        custom_model_params=custom_model_params(),
    )

    print("Job: %s" % job[0].id)

    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    assert get_data_source() is not None

    forecasts = (Power.query.filter(Power.asset_id == wind_device_1.id).filter(
        Power.horizon == horizon).filter(
            (Power.datetime >= as_server_time(datetime(2015, 1, 1, 7)))
            &
            (Power.datetime < as_server_time(datetime(2015, 1, 1, 8)))).all())
    assert len(forecasts) == 4
    check_aggregate(4, horizon)
예제 #5
0
def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app):
    solar_device1: Asset = Asset.query.filter_by(
        name="solar-asset-1").one_or_none()

    last_power_datetime = (
        (Power.query.filter(Power.asset_id == solar_device1.id).filter(
            Power.horizon == timedelta(hours=0)).order_by(
                Power.datetime.desc())).first().datetime
    )  # datetime index of the last power value 11.45pm (Jan 1st)

    # makes 4 forecasts, 1 of which is for a new datetime index
    horizon = timedelta(hours=6)
    job = create_forecasting_jobs(
        timed_value_type="Power",
        start_of_roll=last_power_datetime - horizon - timedelta(
            minutes=30),  # start of data on which forecast is based (5.15pm)
        end_of_roll=last_power_datetime - horizon + timedelta(
            minutes=30),  # end of data on which forecast is based (6.15pm)
        horizons=[
            timedelta(hours=6)
        ],  # so we want forecasts for 11.15pm (Jan 1st) to 0.15am (Jan 2nd)
        asset_id=solar_device1.id,
        custom_model_params=custom_model_params(),
    )
    print("Job: %s" % job[0].id)

    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    forecasts = (Power.query.filter(Power.asset_id == solar_device1.id).filter(
        Power.horizon == horizon).filter(
            Power.datetime > last_power_datetime).all())
    assert len(forecasts) == 1
    check_aggregate(4, horizon)
예제 #6
0
def test_forecasting_two_hours_of_solar(
    app, run_as_cli, setup_fresh_test_data, clean_redis
):
    solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none()
    wind_device2: Sensor = Sensor.query.filter_by(name="wind-asset-2").one_or_none()
    print(solar_device1)
    print(wind_device2)

    # makes 8 forecasts
    horizon = timedelta(hours=1)
    job = create_forecasting_jobs(
        start_of_roll=as_server_time(datetime(2015, 1, 1, 12)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 14)),
        horizons=[horizon],
        sensor_id=solar_device1.id,
        custom_model_params=custom_model_params(),
    )
    print("Job: %s" % job[0].id)

    work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception)
    forecasts = (
        TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id)
        .filter(TimedBelief.belief_horizon == horizon)
        .filter(
            (TimedBelief.event_start >= as_server_time(datetime(2015, 1, 1, 13)))
            & (TimedBelief.event_start < as_server_time(datetime(2015, 1, 1, 15)))
        )
        .all()
    )
    assert len(forecasts) == 8
    check_aggregate(8, horizon, solar_device1.id)
예제 #7
0
def test_failed_forecasting_invalid_horizon(app, run_as_cli, clean_redis,
                                            setup_test_data):
    """This one (as well as the fallback) should fail as the horizon is invalid."""
    solar_device1: Sensor = Sensor.query.filter_by(
        name="solar-asset-1").one_or_none()
    create_forecasting_jobs(
        start_of_roll=as_server_time(datetime(2015, 1, 1, 21)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 23)),
        horizons=[timedelta(hours=18)],
        sensor_id=solar_device1.id,
        custom_model_params=custom_model_params(),
    )
    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)
    check_failures(app.queues["forecasting"], 2 * ["InvalidHorizonException"])
예제 #8
0
def test_failed_forecasting_insufficient_data(app, run_as_cli, clean_redis,
                                              setup_test_data):
    """This one (as well as the fallback) should fail as there is no underlying data.
    (Power data is in 2015)"""
    solar_device1: Sensor = Sensor.query.filter_by(
        name="solar-asset-1").one_or_none()
    create_forecasting_jobs(
        start_of_roll=as_server_time(datetime(2016, 1, 1, 20)),
        end_of_roll=as_server_time(datetime(2016, 1, 1, 22)),
        horizons=[timedelta(hours=1)],
        sensor_id=solar_device1.id,
        custom_model_params=custom_model_params(),
    )
    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)
    check_failures(app.queues["forecasting"], 2 * ["NotEnoughDataException"])
예제 #9
0
def test_failed_unknown_model(app, clean_redis, setup_test_data):
    """This one should fail because we use a model search term which yields no model configurator."""
    solar_device1: Sensor = Sensor.query.filter_by(
        name="solar-asset-1").one_or_none()
    horizon = timedelta(hours=1)

    cmp = custom_model_params()
    cmp["training_and_testing_period"] = timedelta(days=365)

    create_forecasting_jobs(
        start_of_roll=as_server_time(datetime(2015, 1, 1, 12)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, 14)),
        horizons=[horizon],
        sensor_id=solar_device1.id,
        model_search_term="no-one-knows-this",
        custom_model_params=cmp,
    )
    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    check_failures(app.queues["forecasting"],
                   ["No model found for search term"])
예제 #10
0
def test_scheduling_a_charging_station(db, app):
    """Test one clean run of one scheduling job:
    - data source was made,
    - schedule has been made

    Starting with a state of charge 1 kWh, within 2 hours we should be able to reach 5 kWh.
    """
    soc_at_start = 1
    target_soc = 5
    duration_until_target = timedelta(hours=2)

    charging_station = Asset.query.filter(
        Asset.name == "Test charging station"
    ).one_or_none()
    start = as_server_time(datetime(2015, 1, 2))
    end = as_server_time(datetime(2015, 1, 3))
    resolution = timedelta(minutes=15)
    target_soc_datetime = start + duration_until_target
    soc_targets = pd.Series(
        np.nan, index=pd.date_range(start, end, freq=resolution, closed="right")
    )
    soc_targets.loc[target_soc_datetime] = target_soc

    assert (
        DataSource.query.filter_by(name="Seita", type="scheduling script").one_or_none()
        is None
    )  # Make sure the scheduler data source isn't there

    job = create_scheduling_job(
        charging_station.id,
        start,
        end,
        belief_time=start,
        resolution=resolution,
        soc_at_start=soc_at_start,
        soc_targets=soc_targets,
    )

    print("Job: %s" % job.id)

    work_on_rq(app.queues["scheduling"], exc_handler=exception_reporter)

    scheduler_source = DataSource.query.filter_by(
        name="Seita", type="scheduling script"
    ).one_or_none()
    assert (
        scheduler_source is not None
    )  # Make sure the scheduler data source is now there

    power_values = (
        Power.query.filter(Power.asset_id == charging_station.id)
        .filter(Power.data_source_id == scheduler_source.id)
        .all()
    )
    consumption_schedule = pd.Series(
        [-v.value for v in power_values],
        index=pd.DatetimeIndex([v.datetime for v in power_values]),
    )  # For consumption schedules, positive values denote consumption. For the db, consumption is negative
    assert len(consumption_schedule) == 96
    print(consumption_schedule.head(12))
    assert (
        consumption_schedule.head(8).sum() * (resolution / timedelta(hours=1)) == 4.0
    )  # The first 2 hours should consume 4 kWh to charge from 1 to 5 kWh
예제 #11
0
def test_post_udi_event_and_get_device_message_with_unknown_prices(
        setup_fresh_api_test_data, clean_redis, app, message):
    auth_token = None
    with app.test_client() as client:
        sensor = Sensor.query.filter(
            Sensor.name == "Test battery").one_or_none()
        message["event"] = message["event"] % sensor.id
        auth_token = get_auth_token(client, "*****@*****.**",
                                    "testtest")
        post_udi_event_response = client.post(
            url_for("flexmeasures_api_v1_3.post_udi_event"),
            json=message,
            headers={"Authorization": auth_token},
        )
        print("Server responded with:\n%s" % post_udi_event_response.json)
        assert post_udi_event_response.status_code == 200
        assert post_udi_event_response.json["type"] == "PostUdiEventResponse"

        # look for scheduling jobs in queue
        assert (len(app.queues["scheduling"]) == 1
                )  # only 1 schedule should be made for 1 asset
        job = app.queues["scheduling"].jobs[0]
        assert job.kwargs["sensor_id"] == sensor.id
        assert job.kwargs["start"] == parse_datetime(message["datetime"])
        assert job.id == message["event"]
        assert (Job.fetch(
            message["event"],
            connection=app.queues["scheduling"].connection) == job)

        # process the scheduling queue
        work_on_rq(app.queues["scheduling"],
                   exc_handler=handle_scheduling_exception)
        processed_job = Job.fetch(
            message["event"], connection=app.queues["scheduling"].connection)
        assert processed_job.is_failed is True

        # check results are not in the database
        scheduler_source = DataSource.query.filter_by(
            name="Seita", type="scheduling script").one_or_none()
        assert (scheduler_source is None
                )  # Make sure the scheduler data source is still not there

        # try to retrieve the schedule through the getDeviceMessage api endpoint
        message = message_for_get_device_message()
        message["event"] = message["event"] % sensor.id
        auth_token = get_auth_token(client, "*****@*****.**",
                                    "testtest")
        get_device_message_response = client.get(
            url_for("flexmeasures_api_v1_3.get_device_message"),
            query_string=message,
            headers={
                "content-type": "application/json",
                "Authorization": auth_token
            },
        )
        print("Server responded with:\n%s" % get_device_message_response.json)
        assert get_device_message_response.status_code == 400
        assert get_device_message_response.json[
            "type"] == "GetDeviceMessageResponse"
        assert (get_device_message_response.json["status"] ==
                unknown_schedule()[0]["status"])
        assert "prices unknown" in get_device_message_response.json[
            "message"].lower()
예제 #12
0
def test_trigger_and_get_schedule(
    app,
    add_market_prices,
    add_battery_assets,
    battery_soc_sensor,
    add_charging_station_assets,
    message,
    asset_name,
):
    # trigger a schedule through the /sensors/<id>/schedules/trigger [POST] api endpoint
    message["roundtrip-efficiency"] = 0.98
    message["soc-min"] = 0
    message["soc-max"] = 25
    with app.test_client() as client:
        sensor = Sensor.query.filter(Sensor.name == asset_name).one_or_none()
        message[
            "soc-sensor"] = f"ea1.2018-06.localhost:fm1.{battery_soc_sensor.id}"
        auth_token = get_auth_token(client, "*****@*****.**",
                                    "testtest")
        trigger_schedule_response = client.post(
            url_for("SensorAPI:trigger_schedule", id=sensor.id),
            json=message,
            headers={"Authorization": auth_token},
        )
        print("Server responded with:\n%s" % trigger_schedule_response.json)
        assert trigger_schedule_response.status_code == 200
        job_id = trigger_schedule_response.json["schedule"]

    # look for scheduling jobs in queue
    assert (len(app.queues["scheduling"]) == 1
            )  # only 1 schedule should be made for 1 asset
    job = app.queues["scheduling"].jobs[0]
    assert job.kwargs["sensor_id"] == sensor.id
    assert job.kwargs["start"] == parse_datetime(message["start"])
    assert job.id == job_id

    # process the scheduling queue
    work_on_rq(app.queues["scheduling"],
               exc_handler=handle_scheduling_exception)
    assert (Job.fetch(
        job_id, connection=app.queues["scheduling"].connection).is_finished is
            True)

    # check results are in the database
    resolution = timedelta(minutes=15)
    scheduler_source = DataSource.query.filter_by(
        name="Seita", type="scheduling script").one_or_none()
    assert (scheduler_source
            is not None)  # Make sure the scheduler data source is now there
    power_values = (TimedBelief.query.filter(
        TimedBelief.sensor_id == sensor.id).filter(
            TimedBelief.source_id == scheduler_source.id).all())
    consumption_schedule = pd.Series(
        [-v.event_value for v in power_values],
        index=pd.DatetimeIndex([v.event_start for v in power_values],
                               freq=resolution),
    )  # For consumption schedules, positive values denote consumption. For the db, consumption is negative
    assert (len(consumption_schedule) ==
            app.config.get("FLEXMEASURES_PLANNING_HORIZON") / resolution)

    # check targets, if applicable
    if "targets" in message:
        start_soc = message["soc-at-start"] / 1000  # in MWh
        soc_schedule = integrate_time_series(consumption_schedule, start_soc,
                                             6)
        print(consumption_schedule)
        print(soc_schedule)
        for target in message["targets"]:
            assert soc_schedule[
                target["datetime"]] == target["soc-target"] / 1000

    # try to retrieve the schedule through the /sensors/<id>/schedules/<job_id> [GET] api endpoint
    get_schedule_message = message_for_get_device_message(
        targets="soc-targets" in message)
    del get_schedule_message["type"]
    auth_token = get_auth_token(client, "*****@*****.**",
                                "testtest")
    get_schedule_response = client.get(
        url_for("SensorAPI:get_schedule", id=sensor.id, uuid=job_id),
        query_string=get_schedule_message,
        headers={
            "content-type": "application/json",
            "Authorization": auth_token
        },
    )
    print("Server responded with:\n%s" % get_schedule_response.json)
    assert get_schedule_response.status_code == 200
    # assert get_schedule_response.json["type"] == "GetDeviceMessageResponse"
    assert len(get_schedule_response.json["values"]) == 192

    # Test that a shorter planning horizon yields the same result for the shorter planning horizon
    get_schedule_message["duration"] = "PT6H"
    get_schedule_response_short = client.get(
        url_for("SensorAPI:get_schedule", id=sensor.id, uuid=job_id),
        query_string=get_schedule_message,
        headers={
            "content-type": "application/json",
            "Authorization": auth_token
        },
    )
    assert (get_schedule_response_short.json["values"] ==
            get_schedule_response.json["values"][0:24])

    # Test that a much longer planning horizon yields the same result (when there are only 2 days of prices)
    get_schedule_message["duration"] = "PT1000H"
    get_schedule_response_long = client.get(
        url_for("SensorAPI:get_schedule", id=sensor.id, uuid=job_id),
        query_string=get_schedule_message,
        headers={
            "content-type": "application/json",
            "Authorization": auth_token
        },
    )
    assert (get_schedule_response_long.json["values"][0:192] ==
            get_schedule_response.json["values"])
예제 #13
0
def test_failed_model_with_too_much_training_then_succeed_with_fallback(
        app, model_to_start_with, model_version):
    """
    Here we fail once - because we start with a model that needs too much training.
    So we check for this failure happening as expected.
    But then, we do succeed with the fallback model one level down.
    (fail-test falls back to linear & linear falls back to naive).
    As a result, there should be forecasts in the DB.
    """
    solar_device1: Asset = Asset.query.filter_by(
        name="solar-asset-1").one_or_none()
    horizon_hours = 1
    horizon = timedelta(hours=horizon_hours)

    cmp = custom_model_params()
    hour_start = 5
    if model_to_start_with == "linear-OLS":
        # making the linear model fail and fall back to naive
        hour_start = 3  # Todo: explain this parameter; why would it fail to forecast if data is there for the full day?

    # The failed test model (this failure enqueues a new job)
    create_forecasting_jobs(
        timed_value_type="Power",
        start_of_roll=as_server_time(datetime(2015, 1, 1, hour_start)),
        end_of_roll=as_server_time(datetime(2015, 1, 1, hour_start + 2)),
        horizons=[horizon],
        asset_id=solar_device1.id,
        model_search_term=model_to_start_with,
        custom_model_params=cmp,
    )
    work_on_rq(app.queues["forecasting"],
               exc_handler=handle_forecasting_exception)

    # Check if the correct model failed in the expected way
    check_failures(
        app.queues["forecasting"],
        ["NotEnoughDataException"],
        ["%s model v%d" % (model_to_start_with, model_version)],
    )

    # this query is useful to check data:
    def make_query(the_horizon_hours: int) -> Query:
        the_horizon = timedelta(hours=the_horizon_hours)
        return (Power.query.filter(Power.asset_id == solar_device1.id).filter(
            Power.horizon == the_horizon).filter(
                (Power.datetime >= as_server_time(
                    datetime(2015, 1, 1, hour_start + the_horizon_hours)))
                & (Power.datetime < as_server_time(
                    datetime(2015, 1, 1, hour_start + the_horizon_hours + 2))))
                )

    # The successful (linear or naive) OLS leads to these.
    forecasts = make_query(the_horizon_hours=horizon_hours).all()

    assert len(forecasts) == 8
    check_aggregate(8, horizon)

    if model_to_start_with == "linear-OLS":
        existing_data = make_query(the_horizon_hours=0).all()

        for ed, fd in zip(existing_data, forecasts):
            assert ed.value == fd.value

    # Now to check which models actually got to work.
    # We check which data sources do and do not exist by now:
    assert (get_data_source("failing-test model v1") is None
            )  # the test failure model failed -> no data source
    if model_to_start_with == "linear-OLS":
        assert (
            get_data_source() is None
        )  # the default (linear regression) (was made to) fail, as well
        assert (get_data_source("naive model v1")
                is not None)  # the naive one had to be used
    else:
        assert get_data_source() is not None  # the default (linear regression)
        assert (get_data_source("naive model v1") is None
                )  # the naive one did not have to be used
예제 #14
0
def test_post_udi_event_and_get_device_message(app, message, asset_name):
    auth_token = None
    with app.test_client() as client:
        asset = Asset.query.filter(Asset.name == asset_name).one_or_none()
        asset_id = asset.id
        asset_owner_id = asset.owner_id
        message["event"] = message["event"] % (asset.owner_id, asset.id)
        auth_token = get_auth_token(client, "*****@*****.**", "testtest")
        post_udi_event_response = client.post(
            url_for("flexmeasures_api_v1_3.post_udi_event"),
            json=message,
            headers={"Authorization": auth_token},
        )
        print("Server responded with:\n%s" % post_udi_event_response.json)
        assert post_udi_event_response.status_code == 200
        assert post_udi_event_response.json["type"] == "PostUdiEventResponse"

    # test asset state in database
    msg_dt = parse_datetime(message["datetime"])
    asset = Asset.query.filter(Asset.name == asset_name).one_or_none()
    assert asset.soc_datetime == msg_dt
    assert asset.soc_in_mwh == message["value"] / 1000
    assert asset.soc_udi_event_id == 204

    # look for scheduling jobs in queue
    assert (
        len(app.queues["scheduling"]) == 1
    )  # only 1 schedule should be made for 1 asset
    job = app.queues["scheduling"].jobs[0]
    assert job.kwargs["asset_id"] == asset_id
    assert job.kwargs["start"] == parse_datetime(message["datetime"])
    assert job.id == message["event"]

    # process the scheduling queue
    work_on_rq(app.queues["scheduling"], exc_handler=handle_scheduling_exception)
    assert (
        Job.fetch(
            message["event"], connection=app.queues["scheduling"].connection
        ).is_finished
        is True
    )

    # check results are in the database
    resolution = timedelta(minutes=15)
    scheduler_source = DataSource.query.filter_by(
        name="Seita", type="scheduling script"
    ).one_or_none()
    assert (
        scheduler_source is not None
    )  # Make sure the scheduler data source is now there
    power_values = (
        Power.query.filter(Power.asset_id == asset_id)
        .filter(Power.data_source_id == scheduler_source.id)
        .all()
    )
    consumption_schedule = pd.Series(
        [-v.value for v in power_values],
        index=pd.DatetimeIndex([v.datetime for v in power_values], freq=resolution),
    )  # For consumption schedules, positive values denote consumption. For the db, consumption is negative
    assert (
        len(consumption_schedule)
        == app.config.get("FLEXMEASURES_PLANNING_HORIZON") / resolution
    )

    # check targets, if applicable
    if "targets" in message:
        start_soc = message["value"] / 1000  # in MWh
        soc_schedule = integrate_time_series(consumption_schedule, start_soc, 6)
        print(consumption_schedule)
        print(soc_schedule)
        for target in message["targets"]:
            assert soc_schedule[target["datetime"]] == target["value"] / 1000

    # try to retrieve the schedule through the getDeviceMessage api endpoint
    get_device_message = message_for_get_device_message()
    get_device_message["event"] = get_device_message["event"] % (
        asset_owner_id,
        asset_id,
    )
    auth_token = get_auth_token(client, "*****@*****.**", "testtest")
    get_device_message_response = client.get(
        url_for("flexmeasures_api_v1_3.get_device_message"),
        query_string=get_device_message,
        headers={"content-type": "application/json", "Authorization": auth_token},
    )
    print("Server responded with:\n%s" % get_device_message_response.json)
    assert get_device_message_response.status_code == 200
    assert get_device_message_response.json["type"] == "GetDeviceMessageResponse"
    assert len(get_device_message_response.json["values"]) == 192

    # Test that a shorter planning horizon yields the same result for the shorter planning horizon
    get_device_message["duration"] = "PT6H"
    get_device_message_response_short = client.get(
        url_for("flexmeasures_api_v1_3.get_device_message"),
        query_string=get_device_message,
        headers={"content-type": "application/json", "Authorization": auth_token},
    )
    assert (
        get_device_message_response_short.json["values"]
        == get_device_message_response.json["values"][0:24]
    )

    # Test that a much longer planning horizon yields the same result (when there are only 2 days of prices)
    get_device_message["duration"] = "PT1000H"
    get_device_message_response_long = client.get(
        url_for("flexmeasures_api_v1_3.get_device_message"),
        query_string=get_device_message,
        headers={"content-type": "application/json", "Authorization": auth_token},
    )
    assert (
        get_device_message_response_long.json["values"][0:192]
        == get_device_message_response.json["values"]
    )

    # sending again results in an error, unless we increase the event ID
    with app.test_client() as client:
        next_msg_dt = msg_dt + timedelta(minutes=5)
        message["datetime"] = next_msg_dt.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
        post_udi_event_response = client.post(
            url_for("flexmeasures_api_v1_3.post_udi_event"),
            json=message,
            headers={"Authorization": auth_token},
        )
        print("Server responded with:\n%s" % post_udi_event_response.json)
        assert post_udi_event_response.status_code == 400
        assert post_udi_event_response.json["type"] == "PostUdiEventResponse"
        assert post_udi_event_response.json["status"] == "OUTDATED_UDI_EVENT"

        message["event"] = message["event"].replace("204", "205")
        post_udi_event_response = client.post(
            url_for("flexmeasures_api_v1_3.post_udi_event"),
            json=message,
            headers={"Authorization": auth_token},
        )
        print("Server responded with:\n%s" % post_udi_event_response.json)
        assert post_udi_event_response.status_code == 200
        assert post_udi_event_response.json["type"] == "PostUdiEventResponse"

    # test database state
    asset = Asset.query.filter(Asset.name == asset_name).one_or_none()
    assert asset.soc_datetime == next_msg_dt
    assert asset.soc_in_mwh == message["value"] / 1000
    assert asset.soc_udi_event_id == 205

    # process the scheduling queue
    work_on_rq(app.queues["scheduling"], exc_handler=handle_scheduling_exception)
    # the job still fails due to missing prices for the last time slot, but we did test that the api and worker now processed the UDI event and attempted to create a schedule
    assert (
        Job.fetch(
            message["event"], connection=app.queues["scheduling"].connection
        ).is_failed
        is True
    )