def create_test_battery_assets( db: SQLAlchemy, setup_roles_users, setup_markets ) -> Dict[str, Asset]: """ Add two battery assets, set their capacity values and their initial SOC. """ db.session.add( AssetType( name="battery", is_consumer=True, is_producer=True, can_curtail=True, can_shift=True, daily_seasonality=True, weekly_seasonality=True, yearly_seasonality=True, ) ) test_battery = Asset( name="Test battery", owner_id=setup_roles_users["Test Prosumer User"].id, asset_type_name="battery", event_resolution=timedelta(minutes=15), capacity_in_mw=2, max_soc_in_mwh=5, min_soc_in_mwh=0, soc_in_mwh=2.5, soc_datetime=as_server_time(datetime(2015, 1, 1)), soc_udi_event_id=203, latitude=10, longitude=100, market_id=setup_markets["epex_da"].id, unit="MW", ) db.session.add(test_battery) test_battery_no_prices = Asset( name="Test battery with no known prices", owner_id=setup_roles_users["Test Prosumer User"].id, asset_type_name="battery", event_resolution=timedelta(minutes=15), capacity_in_mw=2, max_soc_in_mwh=5, min_soc_in_mwh=0, soc_in_mwh=2.5, soc_datetime=as_server_time(datetime(2040, 1, 1)), soc_udi_event_id=203, latitude=10, longitude=100, market_id=setup_markets["epex_da"].id, unit="MW", ) db.session.add(test_battery_no_prices) return { "Test battery": test_battery, "Test battery with no known prices": test_battery_no_prices, }
def make_query(the_horizon_hours: int) -> Query: the_horizon = timedelta(hours=the_horizon_hours) return (Power.query.filter(Power.asset_id == solar_device1.id).filter( Power.horizon == the_horizon).filter( (Power.datetime >= as_server_time( datetime(2015, 1, 1, hour_start + the_horizon_hours))) & (Power.datetime < as_server_time( datetime(2015, 1, 1, hour_start + the_horizon_hours + 2)))) )
def add_battery_assets(db: SQLAlchemy, setup_roles_users, setup_markets): """Add two battery assets, set their capacity values and their initial SOC.""" db.session.add( AssetType( name="battery", is_consumer=True, is_producer=True, can_curtail=True, can_shift=True, daily_seasonality=True, weekly_seasonality=True, yearly_seasonality=True, ) ) from flexmeasures.data.models.user import User, Role user_datastore = SQLAlchemySessionUserDatastore(db.session, User, Role) test_prosumer = user_datastore.find_user(email="*****@*****.**") epex_da = Market.query.filter(Market.name == "epex_da").one_or_none() battery = Asset( name="Test battery", asset_type_name="battery", event_resolution=timedelta(minutes=15), capacity_in_mw=2, max_soc_in_mwh=5, min_soc_in_mwh=0, soc_in_mwh=2.5, soc_datetime=as_server_time(datetime(2015, 1, 1)), soc_udi_event_id=203, latitude=10, longitude=100, market_id=epex_da.id, unit="MW", ) battery.owner = test_prosumer db.session.add(battery) battery = Asset( name="Test battery with no known prices", asset_type_name="battery", event_resolution=timedelta(minutes=15), capacity_in_mw=2, max_soc_in_mwh=5, min_soc_in_mwh=0, soc_in_mwh=2.5, soc_datetime=as_server_time(datetime(2040, 1, 1)), soc_udi_event_id=203, latitude=10, longitude=100, market_id=epex_da.id, unit="MW", ) battery.owner = test_prosumer db.session.add(battery)
def set_time_range_for_session(): """Set period (start_date, end_date and resolution) on session if they are not yet set. The datepicker sends times as tz-aware UTC strings. We re-interpret them as being in the server's timezone. Also set the forecast horizon, if given.""" if "start_time" in request.values: session["start_time"] = time_utils.localized_datetime( iso8601.parse_date(request.values.get("start_time")) ) elif "start_time" not in session: session["start_time"] = time_utils.get_default_start_time() else: if ( session["start_time"].tzinfo is None ): # session storage seems to lose tz info and becomes UTC session["start_time"] = time_utils.as_server_time(session["start_time"]) if "end_time" in request.values: session["end_time"] = time_utils.localized_datetime( iso8601.parse_date(request.values.get("end_time")) ) elif "end_time" not in session: session["end_time"] = time_utils.get_default_end_time() else: if session["end_time"].tzinfo is None: session["end_time"] = time_utils.as_server_time(session["end_time"]) # Our demo server's UI should only work with the current year's data if current_app.config.get("FLEXMEASURES_MODE", "") == "demo": session["start_time"] = session["start_time"].replace(year=datetime.now().year) session["end_time"] = session["end_time"].replace(year=datetime.now().year) if session["start_time"] >= session["end_time"]: session["start_time"], session["end_time"] = ( session["end_time"], session["start_time"], ) if session["start_time"] >= session["end_time"]: raise BadRequest( "Start time %s is not after end time %s." % (session["start_time"], session["end_time"]) ) session["resolution"] = time_utils.decide_resolution( session["start_time"], session["end_time"] ) if "forecast_horizon" in request.values: session["forecast_horizon"] = request.values.get("forecast_horizon") allowed_horizons = time_utils.forecast_horizons_for(session["resolution"]) if ( session.get("forecast_horizon") not in allowed_horizons and len(allowed_horizons) > 0 ): session["forecast_horizon"] = allowed_horizons[0]
def test_failed_forecasting_invalid_horizon(app, run_as_cli, clean_redis, setup_test_data): """This one (as well as the fallback) should fail as the horizon is invalid.""" solar_device1: Sensor = Sensor.query.filter_by( name="solar-asset-1").one_or_none() create_forecasting_jobs( start_of_roll=as_server_time(datetime(2015, 1, 1, 21)), end_of_roll=as_server_time(datetime(2015, 1, 1, 23)), horizons=[timedelta(hours=18)], sensor_id=solar_device1.id, custom_model_params=custom_model_params(), ) work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) check_failures(app.queues["forecasting"], 2 * ["InvalidHorizonException"])
def add_test_weather_sensor_and_forecasts(db: SQLAlchemy): """one day of test data (one complete sine curve) for two sensors""" data_source = DataSource.query.filter_by(name="Seita", type="demo script").one_or_none() for sensor_name in ("radiation", "wind_speed"): sensor_type = WeatherSensorType(name=sensor_name) sensor = WeatherSensor(name=sensor_name, sensor_type=sensor_type, latitude=100, longitude=100) db.session.add(sensor) time_slots = pd.date_range(datetime(2015, 1, 1), datetime(2015, 1, 2, 23, 45), freq="15T") values = [ random() * (1 + np.sin(x / 15)) for x in range(len(time_slots)) ] if sensor_name == "temperature": values = [value * 17 for value in values] if sensor_name == "wind_speed": values = [value * 45 for value in values] if sensor_name == "radiation": values = [value * 600 for value in values] for dt, val in zip(time_slots, values): db.session.add( Weather( sensor=sensor, datetime=as_server_time(dt), value=val, horizon=timedelta(hours=6), data_source_id=data_source.id, ))
def add_test_weather_sensor_and_forecasts(db: SQLAlchemy, setup_generic_asset_types): """one day of test data (one complete sine curve) for two sensors""" data_source = DataSource.query.filter_by( name="Seita", type="demo script" ).one_or_none() weather_station = GenericAsset( name="Test weather station farther away", generic_asset_type=setup_generic_asset_types["weather_station"], latitude=100, longitude=100, ) for sensor_name, unit in (("irradiance", "kW/m²"), ("wind speed", "m/s")): sensor = Sensor(name=sensor_name, generic_asset=weather_station, unit=unit) db.session.add(sensor) time_slots = pd.date_range( datetime(2015, 1, 1), datetime(2015, 1, 2, 23, 45), freq="15T" ) values = [random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))] if sensor_name == "temperature": values = [value * 17 for value in values] if sensor_name == "wind speed": values = [value * 45 for value in values] if sensor_name == "irradiance": values = [value * 600 for value in values] for dt, val in zip(time_slots, values): db.session.add( TimedBelief( sensor=sensor, event_start=as_server_time(dt), event_value=val, belief_horizon=timedelta(hours=6), source=data_source, ) )
def test_failed_forecasting_insufficient_data(app, run_as_cli, clean_redis, setup_test_data): """This one (as well as the fallback) should fail as there is no underlying data. (Power data is in 2015)""" solar_device1: Sensor = Sensor.query.filter_by( name="solar-asset-1").one_or_none() create_forecasting_jobs( start_of_roll=as_server_time(datetime(2016, 1, 1, 20)), end_of_roll=as_server_time(datetime(2016, 1, 1, 22)), horizons=[timedelta(hours=1)], sensor_id=solar_device1.id, custom_model_params=custom_model_params(), ) work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) check_failures(app.queues["forecasting"], 2 * ["NotEnoughDataException"])
def setup_assets(db, setup_roles_users, setup_markets): """Make some asset types and add assets to known test users.""" data_source = DataSource(name="Seita", type="demo script") db.session.add(data_source) db.session.add( AssetType( name="solar", is_producer=True, can_curtail=True, daily_seasonality=True, yearly_seasonality=True, ) ) db.session.add( AssetType( name="wind", is_producer=True, can_curtail=True, daily_seasonality=True, yearly_seasonality=True, ) ) test_prosumer = find_user_by_email("*****@*****.**") test_market = Market.query.filter_by(name="epex_da").one_or_none() for asset_name in ["wind-asset-1", "wind-asset-2", "solar-asset-1"]: asset = Asset( name=asset_name, asset_type_name="wind" if "wind" in asset_name else "solar", event_resolution=timedelta(minutes=15), capacity_in_mw=1, latitude=10, longitude=100, min_soc_in_mwh=0, max_soc_in_mwh=0, soc_in_mwh=0, unit="MW", market_id=test_market.id, ) asset.owner = test_prosumer db.session.add(asset) # one day of test data (one complete sine curve) time_slots = pd.date_range( datetime(2015, 1, 1), datetime(2015, 1, 1, 23, 45), freq="15T" ) values = [random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))] for dt, val in zip(time_slots, values): p = Power( datetime=as_server_time(dt), horizon=parse_duration("PT0M"), value=val, data_source_id=data_source.id, ) p.asset = asset db.session.add(p)
def test_fallback_to_unsolvable_problem(target_soc, charging_station_name): """Starting with a state of charge 10 kWh, within 2 hours we should be able to reach any state of charge in the range [10, 14] kWh for a unidirectional station, or [6, 14] for a bidirectional station, given a charging capacity of 2 kW. Here we test target states of charge outside that range, ones that we should be able to get as close to as 1 kWh difference. We want our scheduler to handle unsolvable problems like these with a sensible fallback policy. """ soc_at_start = 10 duration_until_target = timedelta(hours=2) expected_gap = 1 epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() charging_station = Sensor.query.filter( Sensor.name == charging_station_name).one_or_none() assert charging_station.get_attribute("capacity_in_mw") == 2 assert Sensor.query.get( charging_station.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) target_soc_datetime = start + duration_until_target soc_targets = pd.Series(np.nan, index=pd.date_range(start, end, freq=resolution, closed="right")) soc_targets.loc[target_soc_datetime] = target_soc consumption_schedule = schedule_charging_station(charging_station, start, end, resolution, soc_at_start, soc_targets) soc_schedule = integrate_time_series(consumption_schedule, soc_at_start, decimal_precision=6) # Check if constraints were met assert (min(consumption_schedule.values) >= charging_station.get_attribute("capacity_in_mw") * -1) assert max(consumption_schedule.values) <= charging_station.get_attribute( "capacity_in_mw") print(consumption_schedule.head(12)) print(soc_schedule.head(12)) assert (abs( abs(soc_schedule.loc[target_soc_datetime] - target_soc) - expected_gap) < TOLERANCE)
def check_data_availability( old_sensor_model, old_time_series_data_model, forecast_start: datetime, forecast_end: datetime, query_window: Tuple[datetime, datetime], horizon: timedelta, ): """Check if enough data is available in the database in the first place, for training window and lagged variables. Otherwise, suggest new forecast period. TODO: we could also check regressor data, if we get regressor specs passed in here. """ q = old_time_series_data_model.query.join( old_sensor_model.__class__).filter( old_sensor_model.__class__.name == old_sensor_model.name) first_value = q.order_by( old_time_series_data_model.event_start.asc()).first() last_value = q.order_by( old_time_series_data_model.event_start.desc()).first() if first_value is None: raise NotEnoughDataException( "No data available at all. Forecasting impossible.") first = as_server_time(first_value.event_start) last = as_server_time(last_value.event_start) if query_window[0] < first: suggested_start = forecast_start + (first - query_window[0]) raise NotEnoughDataException( f"Not enough data to forecast {old_sensor_model.name} " f"for the forecast window {as_server_time(forecast_start)} to {as_server_time(forecast_end)}. " f"I needed to query from {as_server_time(query_window[0])}, " f"but the first value available is from {first} to {first + old_sensor_model.event_resolution}. " f"Consider setting the start date to {as_server_time(suggested_start)}." ) if query_window[1] - horizon > last + old_sensor_model.event_resolution: suggested_end = forecast_end + (last - (query_window[1] - horizon)) raise NotEnoughDataException( f"Not enough data to forecast {old_sensor_model.name} " f"for the forecast window {as_server_time(forecast_start)} to {as_server_time(forecast_end)}. " f"I needed to query until {as_server_time(query_window[1] - horizon)}, " f"but the last value available is from {last} to {last + old_sensor_model.event_resolution}. " f"Consider setting the end date to {as_server_time(suggested_end)}." )
def make_query(the_horizon_hours: int) -> Query: the_horizon = timedelta(hours=the_horizon_hours) return ( TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id) .filter(TimedBelief.belief_horizon == the_horizon) .filter( ( TimedBelief.event_start >= as_server_time( datetime(2015, 1, 1, hour_start + the_horizon_hours) ) ) & ( TimedBelief.event_start < as_server_time( datetime(2015, 1, 1, hour_start + the_horizon_hours + 2) ) ) ) )
def test_charging_station_solver_day_2(target_soc, charging_station_name): """Starting with a state of charge 1 kWh, within 2 hours we should be able to reach any state of charge in the range [1, 5] kWh for a unidirectional station, or [0, 5] for a bidirectional station, given a charging capacity of 2 kW. """ soc_at_start = 1 duration_until_target = timedelta(hours=2) epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() charging_station = Sensor.query.filter( Sensor.name == charging_station_name).one_or_none() assert charging_station.get_attribute("capacity_in_mw") == 2 assert Sensor.query.get( charging_station.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) target_soc_datetime = start + duration_until_target soc_targets = pd.Series(np.nan, index=pd.date_range(start, end, freq=resolution, closed="right")) soc_targets.loc[target_soc_datetime] = target_soc consumption_schedule = schedule_charging_station(charging_station, start, end, resolution, soc_at_start, soc_targets) soc_schedule = integrate_time_series(consumption_schedule, soc_at_start, decimal_precision=6) # Check if constraints were met assert (min(consumption_schedule.values) >= charging_station.get_attribute("capacity_in_mw") * -1) assert (max(consumption_schedule.values) <= charging_station.get_attribute("capacity_in_mw") + TOLERANCE) print(consumption_schedule.head(12)) print(soc_schedule.head(12)) assert abs(soc_schedule.loc[target_soc_datetime] - target_soc) < TOLERANCE
def test_battery_solver_day_1(): epex_da = Market.query.filter(Market.name == "epex_da").one_or_none() battery = Asset.query.filter(Asset.name == "Test battery").one_or_none() start = as_server_time(datetime(2015, 1, 1)) end = as_server_time(datetime(2015, 1, 2)) resolution = timedelta(minutes=15) soc_at_start = battery.soc_in_mwh schedule = schedule_battery(battery, epex_da, start, end, resolution, soc_at_start) soc_schedule = integrate_time_series(schedule, soc_at_start, decimal_precision=6) with pd.option_context("display.max_rows", None, "display.max_columns", 3): print(soc_schedule) # Check if constraints were met assert min(schedule.values) >= battery.capacity_in_mw * -1 assert max(schedule.values) <= battery.capacity_in_mw for soc in soc_schedule.values: assert soc >= battery.min_soc_in_mwh assert soc <= battery.max_soc_in_mwh
def test_failed_unknown_model(app, clean_redis, setup_test_data): """This one should fail because we use a model search term which yields no model configurator.""" solar_device1: Sensor = Sensor.query.filter_by( name="solar-asset-1").one_or_none() horizon = timedelta(hours=1) cmp = custom_model_params() cmp["training_and_testing_period"] = timedelta(days=365) create_forecasting_jobs( start_of_roll=as_server_time(datetime(2015, 1, 1, 12)), end_of_roll=as_server_time(datetime(2015, 1, 1, 14)), horizons=[horizon], sensor_id=solar_device1.id, model_search_term="no-one-knows-this", custom_model_params=cmp, ) work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) check_failures(app.queues["forecasting"], ["No model found for search term"])
def test_generic_model( generic_asset_type_names: List[str], sensor_name: Optional[str] = None, from_date: str = "2015-03-10", period: int = 3, horizon_hours: int = 1, training: int = 30, ): """Manually test integration of timetomodel for our generic model.""" start = as_server_time(datetime.strptime(from_date, "%Y-%m-%d")) end = start + timedelta(days=period) training_and_testing_period = timedelta(days=training) horizon = timedelta(hours=horizon_hours) with app.app_context(): sensors = query_sensor_by_name_and_generic_asset_type_name( sensor_name=sensor_name, generic_asset_type_names=generic_asset_type_names, ).all() if len(sensors) == 0: click.echo( "No such sensor in db, so I will not add any forecasts.") return elif len(sensors) > 1: click.echo( "No unique sensor found in db, so I will not add any forecasts." ) return linear_model_configurator = lookup_model_specs_configurator("linear") ( model_specs, model_identifier, fallback_model_identifier, ) = linear_model_configurator( sensor=sensors[0], forecast_start=start, forecast_end=end, forecast_horizon=horizon, custom_model_params=dict( training_and_testing_period=training_and_testing_period), ) # Create and train the model model = create_fitted_model(model_specs, model_identifier) print("\n\nparams:\n%s\n\n" % model.params) evaluate_models(m1=ModelState(model, model_specs), plot_path=None) return ModelState(model, model_specs)
def test_scheduling_a_battery(db, app): """Test one clean run of one scheduling job: - data source was made, - schedule has been made """ battery = Asset.query.filter(Asset.name == "Test battery").one_or_none() start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) assert ( DataSource.query.filter_by(name="Seita", type="scheduling script").one_or_none() is None ) # Make sure the scheduler data source isn't there job = create_scheduling_job( battery.id, start, end, belief_time=start, resolution=resolution ) print("Job: %s" % job.id) work_on_rq(app.queues["scheduling"], exc_handler=exception_reporter) scheduler_source = DataSource.query.filter_by( name="Seita", type="scheduling script" ).one_or_none() assert ( scheduler_source is not None ) # Make sure the scheduler data source is now there power_values = ( Power.query.filter(Power.asset_id == battery.id) .filter(Power.data_source_id == scheduler_source.id) .all() ) print([v.value for v in power_values]) assert len(power_values) == 96
def test_making_forecasts(): """ Manual test to enqueue and process a forecasting job via redis queue """ click.echo("Manual forecasting job queuing started ...") asset_id = 1 forecast_filter = (Power.query.filter(Power.asset_id == asset_id).filter( Power.horizon == timedelta(hours=6)).filter( (Power.datetime >= as_server_time(datetime(2015, 4, 1, 6))) & (Power.datetime < as_server_time(datetime(2015, 4, 3, 6))))) click.echo("Delete forecasts ...") forecast_filter.delete() click.echo("Forecasts found before : %d" % forecast_filter.count()) create_forecasting_jobs( asset_id=asset_id, timed_value_type="Power", horizons=[timedelta(hours=6)], start_of_roll=as_server_time(datetime(2015, 4, 1)), end_of_roll=as_server_time(datetime(2015, 4, 3)), ) click.echo("Queue before working: %s" % app.queues["forecasting"].jobs) worker = Worker( [app.queues["forecasting"]], connection=app.queues["forecasting"].connection, name="Test CLI Forecaster", exception_handlers=[handle_forecasting_exception], ) worker.work() click.echo("Queue after working: %s" % app.queues["forecasting"].jobs) click.echo("Forecasts found after (should be 24 * 2 * 4 = 192): %d" % forecast_filter.count())
def add_market_prices(db: SQLAlchemy, setup_assets, setup_markets, setup_sources): """Add two days of market prices for the EPEX day-ahead market.""" # one day of test data (one complete sine curve) time_slots = pd.date_range( datetime(2015, 1, 1), datetime(2015, 1, 2), freq="1H", closed="left" ) values = [ random() * (1 + np.sin(x * 2 * np.pi / 24)) for x in range(len(time_slots)) ] day1_beliefs = [ TimedBelief( event_start=as_server_time(dt), belief_horizon=timedelta(hours=0), event_value=val, source=setup_sources["Seita"], sensor=setup_markets["epex_da"].corresponding_sensor, ) for dt, val in zip(time_slots, values) ] db.session.add_all(day1_beliefs) # another day of test data (8 expensive hours, 8 cheap hours, and again 8 expensive hours) time_slots = pd.date_range( datetime(2015, 1, 2), datetime(2015, 1, 3), freq="1H", closed="left" ) values = [100] * 8 + [90] * 8 + [100] * 8 day2_beliefs = [ TimedBelief( event_start=as_server_time(dt), belief_horizon=timedelta(hours=0), event_value=val, source=setup_sources["Seita"], sensor=setup_markets["epex_da"].corresponding_sensor, ) for dt, val in zip(time_slots, values) ] db.session.add_all(day2_beliefs)
def test_battery_solver_day_1(add_battery_assets): epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() assert Sensor.query.get(battery.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 1)) end = as_server_time(datetime(2015, 1, 2)) resolution = timedelta(minutes=15) soc_at_start = battery.get_attribute("soc_in_mwh") schedule = schedule_battery(battery, start, end, resolution, soc_at_start) soc_schedule = integrate_time_series(schedule, soc_at_start, decimal_precision=6) with pd.option_context("display.max_rows", None, "display.max_columns", 3): print(soc_schedule) # Check if constraints were met assert (min(schedule.values) >= battery.get_attribute("capacity_in_mw") * -1 - TOLERANCE) assert max(schedule.values) <= battery.get_attribute("capacity_in_mw") for soc in soc_schedule.values: assert soc >= battery.get_attribute("min_soc_in_mwh") assert soc <= battery.get_attribute("max_soc_in_mwh")
def setup_fresh_test_data( fresh_db, setup_markets_fresh_db, setup_roles_users_fresh_db, setup_generic_asset_types_fresh_db, app, fresh_remove_seasonality_for_power_forecasts, ): db = fresh_db setup_roles_users = setup_roles_users_fresh_db setup_markets = setup_markets_fresh_db data_source = DataSource(name="Seita", type="demo script") db.session.add(data_source) db.session.flush() for asset_name in ["wind-asset-2", "solar-asset-1"]: asset = Asset( name=asset_name, asset_type_name="wind" if "wind" in asset_name else "solar", event_resolution=timedelta(minutes=15), capacity_in_mw=1, latitude=10, longitude=100, min_soc_in_mwh=0, max_soc_in_mwh=0, soc_in_mwh=0, unit="MW", market_id=setup_markets["epex_da"].id, ) asset.owner = setup_roles_users["Test Prosumer User"] db.session.add(asset) time_slots = pd.date_range( datetime(2015, 1, 1), datetime(2015, 1, 1, 23, 45), freq="15T" ) values = [random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))] beliefs = [ TimedBelief( event_start=as_server_time(dt), belief_horizon=parse_duration("PT0M"), event_value=val, sensor=asset.corresponding_sensor, source=data_source, ) for dt, val in zip(time_slots, values) ] db.session.add_all(beliefs) add_test_weather_sensor_and_forecasts(fresh_db, setup_generic_asset_types_fresh_db)
def add_market_prices(db: SQLAlchemy, setup_assets, setup_markets): """Add one day of market prices for the EPEX day-ahead market.""" epex_da = Market.query.filter(Market.name == "epex_da").one_or_none() data_source = DataSource.query.filter_by( name="Seita", type="demo script" ).one_or_none() # one day of test data (one complete sine curve) time_slots = pd.date_range( datetime(2015, 1, 1), datetime(2015, 1, 2), freq="15T", closed="left" ) values = [random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))] for dt, val in zip(time_slots, values): p = Price( datetime=as_server_time(dt), horizon=timedelta(hours=0), value=val, data_source_id=data_source.id, ) p.market = epex_da db.session.add(p) # another day of test data (8 expensive hours, 8 cheap hours, and again 8 expensive hours) time_slots = pd.date_range( datetime(2015, 1, 2), datetime(2015, 1, 3), freq="15T", closed="left" ) values = [100] * 8 * 4 + [90] * 8 * 4 + [100] * 8 * 4 for dt, val in zip(time_slots, values): p = Price( datetime=as_server_time(dt), horizon=timedelta(hours=0), value=val, data_source_id=data_source.id, ) p.market = epex_da db.session.add(p)
def setup_assets( db, setup_roles_users, setup_markets, setup_sources, setup_asset_types ) -> Dict[str, Asset]: """Add assets to known test users. Deprecated. Remove with Asset model.""" assets = [] for asset_name in ["wind-asset-1", "wind-asset-2", "solar-asset-1"]: asset = Asset( name=asset_name, owner_id=setup_roles_users["Test Prosumer User"].id, asset_type_name="wind" if "wind" in asset_name else "solar", event_resolution=timedelta(minutes=15), capacity_in_mw=1, latitude=10, longitude=100, min_soc_in_mwh=0, max_soc_in_mwh=0, soc_in_mwh=0, unit="MW", market_id=setup_markets["epex_da"].id, ) db.session.add(asset) assets.append(asset) # one day of test data (one complete sine curve) time_slots = pd.date_range( datetime(2015, 1, 1), datetime(2015, 1, 1, 23, 45), freq="15T" ) values = [ random() * (1 + np.sin(x * 2 * np.pi / (4 * 24))) for x in range(len(time_slots)) ] beliefs = [ TimedBelief( event_start=as_server_time(dt), belief_horizon=parse_duration("PT0M"), event_value=val, sensor=asset.corresponding_sensor, source=setup_sources["Seita"], ) for dt, val in zip(time_slots, values) ] db.session.add_all(beliefs) return {asset.name: asset for asset in assets}
def make_rolling_viewpoint_forecasts( sensor_id: int, horizon: timedelta, start: datetime, end: datetime, custom_model_params: dict = None, ) -> int: """Build forecasting model specs, make rolling-viewpoint forecasts, and save the forecasts made. Each individual forecast is a belief about a time interval. Rolling-viewpoint forecasts share the same belief horizon (the duration between belief time and knowledge time). Model specs are also retrained in a rolling fashion, but with its own frequency set in custom_model_params. See the timely-beliefs lib for relevant terminology. Parameters ---------- :param sensor_id: int To identify which sensor to forecast :param horizon: timedelta duration between the end of each interval and the time at which the belief about that interval is formed :param start: datetime start of forecast period, i.e. start time of the first interval to be forecast :param end: datetime end of forecast period, i.e end time of the last interval to be forecast :param custom_model_params: dict pass in params which will be passed to the model specs configurator, e.g. outcome_var_transformation, only advisable to be used for testing. :returns: int the number of forecasts made """ # https://docs.sqlalchemy.org/en/13/faq/connections.html#how-do-i-use-engines-connections-sessions-with-python-multiprocessing-or-os-fork db.engine.dispose() rq_job = get_current_job() # find out which model to run, fall back to latest recommended model_search_term = rq_job.meta.get("model_search_term", "linear-OLS") # find sensor sensor = Sensor.query.filter_by(id=sensor_id).one_or_none() click.echo( "Running Forecasting Job %s: %s for %s on model '%s', from %s to %s" % (rq_job.id, sensor, horizon, model_search_term, start, end)) if hasattr(sensor, "market_type"): ex_post_horizon = None # Todo: until we sorted out the ex_post_horizon, use all available price data else: ex_post_horizon = timedelta(hours=0) # Make model specs model_configurator = lookup_model_specs_configurator(model_search_term) model_specs, model_identifier, fallback_model_search_term = model_configurator( sensor=sensor, forecast_start=as_server_time(start), forecast_end=as_server_time(end), forecast_horizon=horizon, ex_post_horizon=ex_post_horizon, custom_model_params=custom_model_params, ) model_specs.creation_time = server_now() rq_job.meta["model_identifier"] = model_identifier rq_job.meta["fallback_model_search_term"] = fallback_model_search_term rq_job.save() # before we run the model, check if horizon is okay and enough data is available if horizon not in supported_horizons(): raise InvalidHorizonException("Invalid horizon on job %s: %s" % (rq_job.id, horizon)) query_window = get_query_window( model_specs.start_of_training, end, [lag * model_specs.frequency for lag in model_specs.lags], ) check_data_availability( sensor, TimedBelief, start, end, query_window, horizon, ) data_source = get_data_source( data_source_name="Seita (%s)" % rq_job.meta.get("model_identifier", "unknown model"), data_source_type="forecasting script", ) forecasts, model_state = make_rolling_forecasts( start=as_server_time(start), end=as_server_time(end), model_specs=model_specs, ) click.echo("Job %s made %d forecasts." % (rq_job.id, len(forecasts))) ts_value_forecasts = [ TimedBelief( event_start=dt, belief_horizon=horizon, event_value=value, sensor=sensor, source=data_source, ) for dt, value in forecasts.items() ] bdf = tb.BeliefsDataFrame(ts_value_forecasts) save_to_db(bdf) db.session.commit() return len(forecasts)
def test_scheduling_a_charging_station(db, app): """Test one clean run of one scheduling job: - data source was made, - schedule has been made Starting with a state of charge 1 kWh, within 2 hours we should be able to reach 5 kWh. """ soc_at_start = 1 target_soc = 5 duration_until_target = timedelta(hours=2) charging_station = Asset.query.filter( Asset.name == "Test charging station" ).one_or_none() start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) target_soc_datetime = start + duration_until_target soc_targets = pd.Series( np.nan, index=pd.date_range(start, end, freq=resolution, closed="right") ) soc_targets.loc[target_soc_datetime] = target_soc assert ( DataSource.query.filter_by(name="Seita", type="scheduling script").one_or_none() is None ) # Make sure the scheduler data source isn't there job = create_scheduling_job( charging_station.id, start, end, belief_time=start, resolution=resolution, soc_at_start=soc_at_start, soc_targets=soc_targets, ) print("Job: %s" % job.id) work_on_rq(app.queues["scheduling"], exc_handler=exception_reporter) scheduler_source = DataSource.query.filter_by( name="Seita", type="scheduling script" ).one_or_none() assert ( scheduler_source is not None ) # Make sure the scheduler data source is now there power_values = ( Power.query.filter(Power.asset_id == charging_station.id) .filter(Power.data_source_id == scheduler_source.id) .all() ) consumption_schedule = pd.Series( [-v.value for v in power_values], index=pd.DatetimeIndex([v.datetime for v in power_values]), ) # For consumption schedules, positive values denote consumption. For the db, consumption is negative assert len(consumption_schedule) == 96 print(consumption_schedule.head(12)) assert ( consumption_schedule.head(8).sum() * (resolution / timedelta(hours=1)) == 4.0 ) # The first 2 hours should consume 4 kWh to charge from 1 to 5 kWh
def add_charging_station_assets(db: SQLAlchemy, setup_roles_users, setup_markets): """Add uni- and bi-directional charging station assets, set their capacity value and their initial SOC.""" db.session.add( AssetType( name="one-way_evse", is_consumer=True, is_producer=False, can_curtail=True, can_shift=True, daily_seasonality=True, weekly_seasonality=True, yearly_seasonality=True, ) ) db.session.add( AssetType( name="two-way_evse", is_consumer=True, is_producer=True, can_curtail=True, can_shift=True, daily_seasonality=True, weekly_seasonality=True, yearly_seasonality=True, ) ) from flexmeasures.data.models.user import User, Role user_datastore = SQLAlchemySessionUserDatastore(db.session, User, Role) test_prosumer = user_datastore.find_user(email="*****@*****.**") epex_da = Market.query.filter(Market.name == "epex_da").one_or_none() charging_station = Asset( name="Test charging station", asset_type_name="one-way_evse", event_resolution=timedelta(minutes=15), capacity_in_mw=2, max_soc_in_mwh=5, min_soc_in_mwh=0, soc_in_mwh=2.5, soc_datetime=as_server_time(datetime(2015, 1, 1)), soc_udi_event_id=203, latitude=10, longitude=100, market_id=epex_da.id, unit="MW", ) charging_station.owner = test_prosumer db.session.add(charging_station) bidirectional_charging_station = Asset( name="Test charging station (bidirectional)", asset_type_name="two-way_evse", event_resolution=timedelta(minutes=15), capacity_in_mw=2, max_soc_in_mwh=5, min_soc_in_mwh=0, soc_in_mwh=2.5, soc_datetime=as_server_time(datetime(2015, 1, 1)), soc_udi_event_id=203, latitude=10, longitude=100, market_id=epex_da.id, unit="MW", ) bidirectional_charging_station.owner = test_prosumer db.session.add(bidirectional_charging_station)
def test_battery_solver_day_2(add_battery_assets, roundtrip_efficiency: float): """Check battery scheduling results for day 2, which is set up with 8 expensive, then 8 cheap, then again 8 expensive hours. If efficiency losses aren't too bad, we expect the scheduler to: - completely discharge within the first 8 hours - completely charge within the next 8 hours - completely discharge within the last 8 hours If efficiency losses are bad, the price difference is not worth cycling the battery, and so we expect the scheduler to only: - completely discharge within the last 8 hours """ epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() assert Sensor.query.get(battery.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) soc_at_start = battery.get_attribute("soc_in_mwh") soc_min = 0.5 soc_max = 4.5 schedule = schedule_battery( battery, start, end, resolution, soc_at_start, soc_min=soc_min, soc_max=soc_max, roundtrip_efficiency=roundtrip_efficiency, ) soc_schedule = integrate_time_series(schedule, soc_at_start, decimal_precision=6) with pd.option_context("display.max_rows", None, "display.max_columns", 3): print(soc_schedule) # Check if constraints were met assert min(schedule.values) >= battery.get_attribute("capacity_in_mw") * -1 assert max( schedule.values) <= battery.get_attribute("capacity_in_mw") + TOLERANCE for soc in soc_schedule.values: assert soc >= max(soc_min, battery.get_attribute("min_soc_in_mwh")) assert soc <= battery.get_attribute("max_soc_in_mwh") # Check whether the resulting soc schedule follows our expectations for 8 expensive, 8 cheap and 8 expensive hours assert soc_schedule.iloc[-1] == max( soc_min, battery.get_attribute("min_soc_in_mwh") ) # Battery sold out at the end of its planning horizon # As long as the roundtrip efficiency isn't too bad (I haven't computed the actual switch point) if roundtrip_efficiency > 0.9: assert soc_schedule.loc[start + timedelta(hours=8)] == max( soc_min, battery.get_attribute( "min_soc_in_mwh")) # Sell what you begin with assert soc_schedule.loc[start + timedelta(hours=16)] == min( soc_max, battery.get_attribute( "max_soc_in_mwh")) # Buy what you can to sell later else: # If the roundtrip efficiency is poor, best to stand idle assert soc_schedule.loc[start + timedelta( hours=8)] == battery.get_attribute("soc_in_mwh") assert soc_schedule.loc[start + timedelta( hours=16)] == battery.get_attribute("soc_in_mwh")
def save_forecasts_in_db(api_key: str, locations: List[Tuple[float, float]], data_source: DataSource): """Process the response from DarkSky into Weather timed values. Collects all forecasts for all locations and all sensors at all locations, then bulk-saves them. """ click.echo("[FLEXMEASURES] Getting weather forecasts:") click.echo("[FLEXMEASURES] Latitude, Longitude") click.echo("[FLEXMEASURES] -----------------------") db_forecasts = [] weather_sensors: dict = {} # keep track of the sensors to save lookups for location in locations: click.echo("[FLEXMEASURES] %s, %s" % location) forecasts = call_darksky(api_key, location) time_of_api_call = as_server_time( datetime.fromtimestamp(forecasts["currently"]["time"], get_timezone())).replace(second=0, microsecond=0) click.echo("[FLEXMEASURES] Called Dark Sky API successfully at %s." % time_of_api_call) # map sensor name in our db to sensor name/label in dark sky response sensor_name_mapping = dict(temperature="temperature", wind_speed="windSpeed", radiation="cloudCover") for fc in forecasts["hourly"]["data"]: fc_datetime = as_server_time( datetime.fromtimestamp(fc["time"], get_timezone())).replace(second=0, microsecond=0) fc_horizon = fc_datetime - time_of_api_call click.echo( "[FLEXMEASURES] Processing forecast for %s (horizon: %s) ..." % (fc_datetime, fc_horizon)) for flexmeasures_sensor_type in sensor_name_mapping.keys(): needed_response_label = sensor_name_mapping[ flexmeasures_sensor_type] if needed_response_label in fc: weather_sensor = weather_sensors.get( flexmeasures_sensor_type, None) if weather_sensor is None: weather_sensor = find_closest_weather_sensor( flexmeasures_sensor_type, lat=location[0], lng=location[1]) if weather_sensor is not None: weather_sensors[ flexmeasures_sensor_type] = weather_sensor else: raise Exception( "No weather sensor set up for this sensor type (%s)" % flexmeasures_sensor_type) fc_value = fc[needed_response_label] # the radiation is not available in dark sky -> we compute it ourselves if flexmeasures_sensor_type == "radiation": fc_value = compute_irradiance( location[0], location[1], fc_datetime, fc[needed_response_label], ) db_forecasts.append( Weather( datetime=fc_datetime, horizon=fc_horizon, value=fc_value, sensor_id=weather_sensor.id, data_source_id=data_source.id, )) else: # we will not fail here, but issue a warning msg = "No label '%s' in response data for time %s" % ( needed_response_label, fc_datetime, ) click.echo("[FLEXMEASURES] %s" % msg) current_app.logger.warning(msg) if len(db_forecasts) == 0: # This is probably a serious problem raise Exception( "Nothing to put in the database was produced. That does not seem right..." ) db.session.bulk_save_objects(db_forecasts)
def test_generic_model( asset_type: str, asset: Optional[str] = None, from_date: str = "2015-03-10", period: int = 3, horizon: int = 1, training: int = 30, ): """Manually test integration of timetomodel for our generic model.""" asset_type_name = asset_type if asset is None: asset_name = Asset.query.filter_by( asset_type_name=asset_type_name).first().name else: asset_name = asset start = as_server_time(datetime.strptime(from_date, "%Y-%m-%d")) end = start + timedelta(days=period) training_and_testing_period = timedelta(days=training) horizon = timedelta(hours=horizon) with app.app_context(): asset = (Asset.query.filter_by( asset_type_name=asset_type_name).filter_by( name=asset_name).first()) market = (Market.query.filter_by( market_type_name=asset_type_name).filter_by( name=asset_name).first()) sensor = (WeatherSensor.query.filter_by( weather_sensor_type_name=asset_type_name).filter_by( name=asset_name).first()) if asset: generic_asset = asset elif market: generic_asset = market elif sensor: generic_asset = sensor else: click.echo( "No such assets in db, so I will not add any forecasts.") return linear_model_configurator = lookup_model_specs_configurator("linear") ( model_specs, model_identifier, fallback_model_identifier, ) = linear_model_configurator( generic_asset=generic_asset, forecast_start=start, forecast_end=end, forecast_horizon=horizon, custom_model_params=dict( training_and_testing_period=training_and_testing_period), ) # Create and train the model model = create_fitted_model(model_specs, model_identifier) print("\n\nparams:\n%s\n\n" % model.params) evaluate_models(m1=ModelState(model, model_specs), plot_path=None) return ModelState(model, model_specs)
def add_charging_station_assets( db: SQLAlchemy, setup_roles_users, setup_markets ) -> Dict[str, Asset]: """Add uni- and bi-directional charging station assets, set their capacity value and their initial SOC.""" db.session.add( AssetType( name="one-way_evse", is_consumer=True, is_producer=False, can_curtail=True, can_shift=True, daily_seasonality=True, weekly_seasonality=True, yearly_seasonality=True, ) ) db.session.add( AssetType( name="two-way_evse", is_consumer=True, is_producer=True, can_curtail=True, can_shift=True, daily_seasonality=True, weekly_seasonality=True, yearly_seasonality=True, ) ) charging_station = Asset( name="Test charging station", owner_id=setup_roles_users["Test Prosumer User"].id, asset_type_name="one-way_evse", event_resolution=timedelta(minutes=15), capacity_in_mw=2, max_soc_in_mwh=5, min_soc_in_mwh=0, soc_in_mwh=2.5, soc_datetime=as_server_time(datetime(2015, 1, 1)), soc_udi_event_id=203, latitude=10, longitude=100, market_id=setup_markets["epex_da"].id, unit="MW", ) db.session.add(charging_station) bidirectional_charging_station = Asset( name="Test charging station (bidirectional)", owner_id=setup_roles_users["Test Prosumer User"].id, asset_type_name="two-way_evse", event_resolution=timedelta(minutes=15), capacity_in_mw=2, max_soc_in_mwh=5, min_soc_in_mwh=0, soc_in_mwh=2.5, soc_datetime=as_server_time(datetime(2015, 1, 1)), soc_udi_event_id=203, latitude=10, longitude=100, market_id=setup_markets["epex_da"].id, unit="MW", ) db.session.add(bidirectional_charging_station) return { "Test charging station": charging_station, "Test charging station (bidirectional)": bidirectional_charging_station, }