def test_collect_power_resampled(db, app, query_start, query_end, resolution, num_values): wind_device_1 = Asset.query.filter_by(name="wind-asset-1").one_or_none() bdf: tb.BeliefsDataFrame = Power.collect(wind_device_1.name, (query_start, query_end), resolution=resolution) print(bdf) assert len(bdf) == num_values
def test_collect_power(db, app, query_start, query_end, num_values): wind_device_1 = Asset.query.filter_by(name="wind-asset-1").one_or_none() data = Power.query.filter(Power.asset_id == wind_device_1.id).all() print(data) bdf: tb.BeliefsDataFrame = Power.collect(wind_device_1.name, (query_start, query_end)) print(bdf) assert ( bdf.index.names[0] == "event_start" ) # first index level of collect function should be event_start, so that df.loc[] refers to event_start assert pd.api.types.is_timedelta64_dtype( bdf.index.get_level_values("belief_horizon") ) # dtype of belief_horizon is timedelta64[ns], so the minimum horizon on an empty BeliefsDataFrame is NaT instead of NaN assert len(bdf) == num_values for v1, v2 in zip(bdf.values, data): assert abs(v1[0] - v2.value) < 10**-6
def collect_connection_and_value_groups( unit: str, resolution: str, belief_horizon_window: Tuple[Union[None, timedelta], Union[None, timedelta]], belief_time_window: Tuple[Optional[datetime_type], Optional[datetime_type]], start: datetime_type, duration: timedelta, connection_groups: List[List[str]], user_source_ids: Union[ int, List[int]] = None, # None is interpreted as all sources source_types: List[str] = None, ) -> Tuple[dict, int]: """ Code for GETting power values from the API. Only allows to get values from assets owned by current user. Returns value sign in accordance with USEF specs (with negative production and positive consumption). """ from flask import current_app current_app.logger.info("GETTING") user_assets = get_assets() if not user_assets: current_app.logger.info("User doesn't seem to have any assets") user_asset_ids = [asset.id for asset in user_assets] end = start + duration value_groups = [] new_connection_groups = ( [] ) # Each connection in the old connection groups will be interpreted as a separate group for connections in connection_groups: # Get the asset names asset_names: List[str] = [] for connection in connections: # Parse the entity address try: connection_details = parse_entity_address( connection, entity_type="connection") except EntityAddressException as eae: return invalid_domain(str(eae)) asset_id = connection_details["asset_id"] # Look for the Asset object if asset_id in user_asset_ids: asset = Asset.query.filter(Asset.id == asset_id).one_or_none() else: current_app.logger.warning("Cannot identify connection %s" % connection) return unrecognized_connection_group() asset_names.append(asset.name) # Get the power values # TODO: fill NaN for non-existing values power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Power.collect( generic_asset_names=asset_names, query_window=(start, end), resolution=resolution, belief_horizon_window=belief_horizon_window, belief_time_window=belief_time_window, user_source_ids=user_source_ids, source_types=source_types, sum_multiple=False, ) # Todo: parse time window of power_bdf_dict, which will be different for requests that are not of the form: # - start is a timestamp on the hour or a multiple of 15 minutes thereafter # - duration is a multiple of 15 minutes for k, bdf in power_bdf_dict.items(): value_groups.append( [x * -1 for x in bdf["event_value"].tolist()] ) # Reverse sign of values (from FlexMeasures specs to USEF specs) new_connection_groups.append(k) response = groups_to_dict(new_connection_groups, value_groups, generic_asset_type_name="connection") response["start"] = isodate.datetime_isoformat(start) response["duration"] = isodate.duration_isoformat(duration) response["unit"] = unit # TODO: convert to requested unit d, s = request_processed() return dict(**response, **d), s