def decorated_service(*args, **kwargs): form = get_form_from_request(request) if form is None: current_app.logger.warning( "Unsupported request method for unpacking 'horizon' from request." ) return invalid_method(request.method) rolling = True if "horizon" in form: horizon, rolling = parse_horizon(form["horizon"]) if horizon is None: current_app.logger.warning("Cannot parse 'horizon' value") return invalid_horizon() elif ex_post is True: if horizon > timedelta(hours=0): extra_info = "Meter data must have a zero or negative horizon to indicate observations after the fact." return invalid_horizon(extra_info) elif infer_missing is True: # A missing horizon is only accepted if the server can infer it if "start" in form and "duration" in form: start = parse_isodate_str(form["start"]) duration = parse_duration(form["duration"], start) if not start: extra_info = "Cannot parse 'start' value." current_app.logger.warning(extra_info) return invalid_period(extra_info) if start.tzinfo is None: current_app.logger.warning( "Cannot parse timezone of 'start' value" ) return invalid_timezone( "Start time should explicitly state a timezone." ) if not duration: extra_info = "Cannot parse 'duration' value." current_app.logger.warning(extra_info) return invalid_period(extra_info) if current_app.config.get("FLEXMEASURES_MODE", "") == "play": horizon = timedelta(hours=0) else: horizon = start + duration - server_now() rolling = False else: current_app.logger.warning( "Request missing both 'horizon', 'start' and 'duration'." ) extra_info = "Specify a 'horizon' value, or 'start' and 'duration' values so that the horizon can be inferred." return invalid_horizon(extra_info) else: # Otherwise, a missing horizon is fine horizon = None kwargs["horizon"] = horizon if infer_missing is True: kwargs["rolling"] = rolling return fn(*args, **kwargs)
def decorated_service(*args, **kwargs): form = get_form_from_request(request) if form is None: current_app.logger.warning( "Unsupported request method for unpacking 'prior' from request." ) return invalid_method(request.method) if "prior" in form: prior = parse_isodate_str(form["prior"]) if ex_post is True: start = parse_isodate_str(form["start"]) duration = parse_duration(form["duration"], start) # todo: validate start and duration (refactor already duplicate code from period_required and optional_horizon_accepted) knowledge_time = ( start + duration ) # todo: take into account knowledge horizon function if prior < knowledge_time: extra_info = "Meter data can only be observed after the fact." return invalid_horizon(extra_info) elif infer_missing is True or ( infer_missing_play is True and current_app.config.get( "FLEXMEASURES_MODE", "") == "play"): # A missing prior is inferred by the server prior = server_now() else: # Otherwise, a missing prior is fine (a horizon may still be inferred by the server) prior = None kwargs["prior"] = prior return fn(*args, **kwargs)
def decorated_service(*args, **kwargs): form = get_form_from_request(request) if form is None: current_app.logger.warning( "Unsupported request method for unpacking 'prior' from request." ) return invalid_method(request.method) if "prior" in form: prior = parse_isodate_str(form["prior"]) if ex_post is True: start = parse_isodate_str(form["start"]) duration = parse_duration(form["duration"], start) # todo: validate start and duration (refactor already duplicate code from period_required and optional_horizon_accepted) knowledge_time = ( start + duration ) # todo: take into account knowledge horizon function if prior < knowledge_time: extra_info = "Meter data can only be observed after the fact." return invalid_horizon(extra_info) else: prior = None kwargs["prior"] = prior return fn(*args, **kwargs)
def test_invalid_horizon(client, message): auth_token = get_auth_token(client, "*****@*****.**", "testtest") get_prognosis_response = client.get( url_for("flexmeasures_api_v1_1.get_prognosis"), query_string=message, headers={ "content-type": "application/json", "Authorization": auth_token }, ) print("Server responded with:\n%s" % get_prognosis_response.json) assert get_prognosis_response.status_code == 400 assert get_prognosis_response.json["type"] == "GetPrognosisResponse" assert get_prognosis_response.json["status"] == invalid_horizon( )[0]["status"]
def decorated_service(*args, **kwargs): form = get_form_from_request(request) if form is None: current_app.logger.warning( "Unsupported request method for unpacking 'horizon' from request." ) return invalid_method(request.method) rolling = True if "horizon" in form: horizon, rolling = parse_horizon(form["horizon"]) if horizon is None: current_app.logger.warning("Cannot parse 'horizon' value") return invalid_horizon() elif ex_post is True: if horizon > timedelta(hours=0): extra_info = "Meter data must have a zero or negative horizon to indicate observations after the fact." return invalid_horizon(extra_info) elif rolling is True and accept_repeating_interval is False: extra_info = ( "API versions 2.0 and higher use regular ISO 8601 durations instead of repeating time intervals. " "For example: R/P1D should be replaced by P1D.") return invalid_horizon(extra_info) elif infer_missing is True or ( infer_missing_play is True and current_app.config.get( "FLEXMEASURES_MODE", "") == "play"): # A missing horizon is set to zero horizon = timedelta(hours=0) else: # Otherwise, a missing horizon is fine (a prior may still be inferred by the server) horizon = None kwargs["horizon"] = horizon if infer_missing is True and accept_repeating_interval is True: kwargs["rolling"] = rolling return fn(*args, **kwargs)
def post_prognosis_response( unit, generic_asset_name_groups, value_groups, horizon, rolling, start, duration, resolution, ) -> Union[dict, Tuple[dict, int]]: """ Store the new power values for each asset. """ if horizon is None: # API versions before v2.0 cannot handle a missing horizon, because there is no prior extra_info = "Please specify the horizon field using an ISO 8601 duration (such as 'PT24H')." return invalid_horizon(extra_info) return create_connection_and_value_groups( unit, generic_asset_name_groups, value_groups, horizon, rolling, start, duration )
def post_price_data_response( # noqa C901 unit, generic_asset_name_groups, horizon, prior, value_groups, start, duration, resolution, ) -> ResponseTuple: # additional validation, todo: to be moved into Marshmallow if horizon is None and prior is None: extra_info = "Missing horizon or prior." return invalid_horizon(extra_info) current_app.logger.info("POSTING PRICE DATA") data_source = get_or_create_source(current_user) price_df_per_market = [] forecasting_jobs = [] for market_group, event_values in zip(generic_asset_name_groups, value_groups): for market in market_group: # Parse the entity address try: ea = parse_entity_address(market, entity_type="market") except EntityAddressException as eae: return invalid_domain(str(eae)) sensor_id = ea["sensor_id"] # Look for the Sensor object sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none() if sensor is None: return unrecognized_market(sensor_id) elif unit != sensor.unit: return invalid_unit("%s prices" % sensor.name, [sensor.unit]) # Convert to timely-beliefs terminology event_starts, belief_horizons = determine_belief_timing( event_values, start, resolution, horizon, prior, sensor ) # Create new Price objects beliefs = [ TimedBelief( event_start=event_start, event_value=event_value, belief_horizon=belief_horizon, sensor=sensor, source=data_source, ) for event_start, event_value, belief_horizon in zip( event_starts, event_values, belief_horizons ) ] price_df_per_market.append(tb.BeliefsDataFrame(beliefs)) # Make forecasts, but not in play mode. Price forecasts (horizon>0) can still lead to other price forecasts, # by the way, due to things like day-ahead markets. if current_app.config.get("FLEXMEASURES_MODE", "") != "play": # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data forecasting_jobs = create_forecasting_jobs( sensor.id, max(start, start + duration - timedelta(hours=24)), start + duration, resolution=duration / len(event_values), horizons=[timedelta(hours=24), timedelta(hours=48)], enqueue=False, # will enqueue later, after saving data ) return save_and_enqueue(price_df_per_market, forecasting_jobs)
def post_power_data( unit, generic_asset_name_groups, value_groups, horizon, prior, start, duration, resolution, create_forecasting_jobs_too, ): # additional validation, todo: to be moved into Marshmallow if horizon is None and prior is None: extra_info = "Missing horizon or prior." return invalid_horizon(extra_info) current_app.logger.info("POSTING POWER DATA") data_source = get_or_create_source(current_user) user_sensors = get_sensors() if not user_sensors: current_app.logger.info("User doesn't seem to have any assets") user_sensor_ids = [sensor.id for sensor in user_sensors] power_df_per_connection = [] forecasting_jobs = [] for connection_group, event_values in zip(generic_asset_name_groups, value_groups): for connection in connection_group: # TODO: get asset through util function after refactoring # Parse the entity address try: ea = parse_entity_address(connection, entity_type="connection") except EntityAddressException as eae: return invalid_domain(str(eae)) sensor_id = ea["sensor_id"] # Look for the Sensor object if sensor_id in user_sensor_ids: sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none() else: current_app.logger.warning("Cannot identify connection %s" % connection) return unrecognized_connection_group() # Validate the sign of the values (following USEF specs with positive consumption and negative production) if sensor.get_attribute("is_strictly_non_positive") and any( v < 0 for v in event_values ): extra_info = ( "Connection %s is registered as a pure consumer and can only receive non-negative values." % sensor.entity_address ) return power_value_too_small(extra_info) elif sensor.get_attribute("is_strictly_non_negative") and any( v > 0 for v in event_values ): extra_info = ( "Connection %s is registered as a pure producer and can only receive non-positive values." % sensor.entity_address ) return power_value_too_big(extra_info) # Convert to timely-beliefs terminology event_starts, belief_horizons = determine_belief_timing( event_values, start, resolution, horizon, prior, sensor ) # Create new Power objects beliefs = [ TimedBelief( event_start=event_start, event_value=event_value * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption belief_horizon=belief_horizon, sensor=sensor, source=data_source, ) for event_start, event_value, belief_horizon in zip( event_starts, event_values, belief_horizons ) ] power_df_per_connection.append(tb.BeliefsDataFrame(beliefs)) if create_forecasting_jobs_too: forecasting_jobs.extend( create_forecasting_jobs( sensor_id, start, start + duration, resolution=duration / len(event_values), enqueue=False, # will enqueue later, after saving data ) ) return save_and_enqueue(power_df_per_connection, forecasting_jobs)
def post_weather_data_response( # noqa: C901 unit, generic_asset_name_groups, horizon, prior, value_groups, start, duration, resolution, ) -> ResponseTuple: # additional validation, todo: to be moved into Marshmallow if horizon is None and prior is None: extra_info = "Missing horizon or prior." return invalid_horizon(extra_info) current_app.logger.info("POSTING WEATHER DATA") data_source = get_or_create_source(current_user) weather_df_per_sensor = [] forecasting_jobs = [] for sensor_group, event_values in zip(generic_asset_name_groups, value_groups): for sensor in sensor_group: # Parse the entity address try: ea = parse_entity_address(sensor, entity_type="weather_sensor") except EntityAddressException as eae: return invalid_domain(str(eae)) weather_sensor_type_name = ea["weather_sensor_type_name"] latitude = ea["latitude"] longitude = ea["longitude"] # Check whether the unit is valid for this sensor type (e.g. no m/s allowed for temperature data) accepted_units = valid_sensor_units(weather_sensor_type_name) if unit not in accepted_units: return invalid_unit(weather_sensor_type_name, accepted_units) sensor: Sensor = get_sensor_by_generic_asset_type_and_location( weather_sensor_type_name, latitude, longitude ) # Convert to timely-beliefs terminology event_starts, belief_horizons = determine_belief_timing( event_values, start, resolution, horizon, prior, sensor ) # Create new Weather objects beliefs = [ TimedBelief( event_start=event_start, event_value=event_value, belief_horizon=belief_horizon, sensor=sensor, source=data_source, ) for event_start, event_value, belief_horizon in zip( event_starts, event_values, belief_horizons ) ] weather_df_per_sensor.append(tb.BeliefsDataFrame(beliefs)) # make forecasts, but only if the sent-in values are not forecasts themselves (and also not in play) if current_app.config.get( "FLEXMEASURES_MODE", "" ) != "play" and horizon <= timedelta( hours=0 ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset forecasting_jobs.extend( create_forecasting_jobs( sensor.id, start, start + duration, resolution=duration / len(event_values), horizons=[horizon], enqueue=False, # will enqueue later, after saving data ) ) return save_and_enqueue(weather_df_per_sensor, forecasting_jobs)