コード例 #1
0
def add_test_weather_sensor_and_forecasts(db: SQLAlchemy):
    """one day of test data (one complete sine curve) for two sensors"""
    data_source = DataSource.query.filter_by(name="Seita",
                                             type="demo script").one_or_none()
    for sensor_name in ("radiation", "wind_speed"):
        sensor_type = WeatherSensorType(name=sensor_name)
        sensor = WeatherSensor(name=sensor_name,
                               sensor_type=sensor_type,
                               latitude=100,
                               longitude=100)
        db.session.add(sensor)
        time_slots = pd.date_range(datetime(2015, 1, 1),
                                   datetime(2015, 1, 2, 23, 45),
                                   freq="15T")
        values = [
            random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))
        ]
        if sensor_name == "temperature":
            values = [value * 17 for value in values]
        if sensor_name == "wind_speed":
            values = [value * 45 for value in values]
        if sensor_name == "radiation":
            values = [value * 600 for value in values]
        for dt, val in zip(time_slots, values):
            db.session.add(
                Weather(
                    sensor=sensor,
                    datetime=as_server_time(dt),
                    value=val,
                    horizon=timedelta(hours=6),
                    data_source_id=data_source.id,
                ))
コード例 #2
0
def configure_regressors_for_nearest_weather_sensor(
        generic_asset,
        generic_asset_type,
        query_window,
        horizon,
        regressor_transformation,  # the regressor transformation can be passed in
        transform_to_normal,  # if not, it a normalization can be applied
) -> List[DBSeriesSpecs]:
    """For Assets, we use weather data as regressors. Here, we configure them."""
    regressor_specs = []
    if isinstance(generic_asset, Asset):
        sensor_types = generic_asset_type.weather_correlations
        current_app.logger.info("For %s, I need sensors: %s" %
                                (generic_asset, sensor_types))
        for sensor_type in sensor_types:

            # Find nearest weather sensor
            closest_sensor = find_closest_weather_sensor(sensor_type,
                                                         object=generic_asset)
            if closest_sensor is None:
                current_app.logger.warning(
                    "No sensor found of sensor type %s to use as regressor for %s."
                    % (sensor_type, generic_asset))
            else:
                current_app.logger.info(
                    "Using sensor %s as regressor for %s." %
                    (sensor_type, generic_asset))
                # Collect the weather data for the requested time window
                regressor_specs_name = "%s_l0" % sensor_type
                if len(regressor_transformation.keys()
                       ) == 0 and transform_to_normal:
                    regressor_transformation = (
                        get_normalization_transformation_by_asset_type(
                            WeatherSensorType(name=sensor_type)))
                regressor_specs.append(
                    DBSeriesSpecs(
                        name=regressor_specs_name,
                        db_engine=db.engine,
                        query=Weather.make_query(
                            asset_names=[closest_sensor.name],
                            query_window=query_window,
                            belief_horizon_window=(horizon, None),
                            session=db.session,
                        ),
                        feature_transformation=regressor_transformation,
                        interpolation_config={"method": "time"},
                    ))

    return regressor_specs
コード例 #3
0
def make_timed_value(
    timed_value_type: str,
    asset_id: int,
    dt: datetime,
    value: float,
    horizon: timedelta,
    data_source_id: int,
) -> Union[Power, Price, Weather]:
    if timed_value_type not in ("Power", "Price", "Weather"):
        raise Exception("Cannot get asset for asset_type '%s'" % timed_value_type)
    ts_value = None
    if timed_value_type == "Power":
        ts_value = Power(
            datetime=dt,
            horizon=horizon,
            value=value,
            asset_id=asset_id,
            data_source_id=data_source_id,
        )
    elif timed_value_type == "Price":
        ts_value = Price(
            datetime=dt,
            horizon=horizon,
            value=value,
            market_id=asset_id,
            data_source_id=data_source_id,
        )
    elif timed_value_type == "Weather":
        ts_value = Weather(
            datetime=dt,
            horizon=horizon,
            value=value,
            sensor_id=asset_id,
            data_source_id=data_source_id,
        )
    if ts_value is None:
        raise Exception(
            "Cannot create asset of type %s with id %d" % (timed_value_type, asset_id)
        )
    return ts_value
コード例 #4
0
def post_weather_data_response(  # noqa: C901
    unit,
    generic_asset_name_groups,
    horizon,
    rolling,
    value_groups,
    start,
    duration,
    resolution,
):
    current_app.logger.info("POSTING WEATHER DATA")
    data_source = get_or_create_user_data_source(current_user)
    weather_measurements = []
    forecasting_jobs = []
    for sensor_group, value_group in zip(generic_asset_name_groups,
                                         value_groups):
        for sensor in sensor_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(sensor, entity_type="sensor")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            weather_sensor_type_name = ea["weather_sensor_type_name"]
            latitude = ea["latitude"]
            longitude = ea["longitude"]

            # Check whether the unit is valid for this sensor type (e.g. no m/s allowed for temperature data)
            accepted_units = valid_sensor_units(weather_sensor_type_name)
            if unit not in accepted_units:
                return invalid_unit(weather_sensor_type_name, accepted_units)

            weather_sensor = get_weather_sensor_by(weather_sensor_type_name,
                                                   latitude, longitude)

            # Create new Weather objects
            for j, value in enumerate(value_group):
                dt = start + j * duration / len(value_group)
                if rolling:
                    h = horizon
                else:  # Deduct the difference in end times of the individual timeslot and the timeseries duration
                    h = horizon - ((start + duration) -
                                   (dt + duration / len(value_group)))
                w = Weather(
                    datetime=dt,
                    value=value,
                    horizon=h,
                    sensor_id=weather_sensor.id,
                    data_source_id=data_source.id,
                )
                weather_measurements.append(w)

            # make forecasts, but only if the sent-in values are not forecasts themselves (and also not in play)
            if current_app.config.get(
                    "FLEXMEASURES_MODE", ""
            ) != "play" and horizon <= timedelta(
                    hours=0
            ):  # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset
                forecasting_jobs.extend(
                    create_forecasting_jobs(
                        "Weather",
                        weather_sensor.id,
                        start,
                        start + duration,
                        resolution=duration / len(value_group),
                        horizons=[horizon],
                        enqueue=
                        False,  # will enqueue later, only if we successfully saved weather measurements
                    ))

    # Put these into the database
    current_app.logger.info("SAVING TO DB...")
    try:
        save_to_session(weather_measurements)
        db.session.flush()
        [
            current_app.queues["forecasting"].enqueue_job(job)
            for job in forecasting_jobs
        ]
        db.session.commit()
        return request_processed()
    except IntegrityError as e:
        current_app.logger.warning(e)
        db.session.rollback()

        # Allow meter data to be replaced only in play mode
        if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
            save_to_session(weather_measurements, overwrite=True)
            [
                current_app.queues["forecasting"].enqueue_job(job)
                for job in forecasting_jobs
            ]
            db.session.commit()
            return request_processed()
        else:
            return already_received_and_successfully_processed()
コード例 #5
0
def save_forecasts_in_db(api_key: str, locations: List[Tuple[float, float]],
                         data_source: DataSource):
    """Process the response from DarkSky into Weather timed values.
    Collects all forecasts for all locations and all sensors at all locations, then bulk-saves them.
    """
    click.echo("[FLEXMEASURES] Getting weather forecasts:")
    click.echo("[FLEXMEASURES]  Latitude, Longitude")
    click.echo("[FLEXMEASURES] -----------------------")
    db_forecasts = []
    weather_sensors: dict = {}  # keep track of the sensors to save lookups

    for location in locations:
        click.echo("[FLEXMEASURES] %s, %s" % location)

        forecasts = call_darksky(api_key, location)
        time_of_api_call = as_server_time(
            datetime.fromtimestamp(forecasts["currently"]["time"],
                                   get_timezone())).replace(second=0,
                                                            microsecond=0)
        click.echo("[FLEXMEASURES] Called Dark Sky API successfully at %s." %
                   time_of_api_call)

        # map sensor name in our db to sensor name/label in dark sky response
        sensor_name_mapping = dict(temperature="temperature",
                                   wind_speed="windSpeed",
                                   radiation="cloudCover")

        for fc in forecasts["hourly"]["data"]:
            fc_datetime = as_server_time(
                datetime.fromtimestamp(fc["time"],
                                       get_timezone())).replace(second=0,
                                                                microsecond=0)
            fc_horizon = fc_datetime - time_of_api_call
            click.echo(
                "[FLEXMEASURES] Processing forecast for %s (horizon: %s) ..." %
                (fc_datetime, fc_horizon))
            for flexmeasures_sensor_type in sensor_name_mapping.keys():
                needed_response_label = sensor_name_mapping[
                    flexmeasures_sensor_type]
                if needed_response_label in fc:
                    weather_sensor = weather_sensors.get(
                        flexmeasures_sensor_type, None)
                    if weather_sensor is None:
                        weather_sensor = find_closest_weather_sensor(
                            flexmeasures_sensor_type,
                            lat=location[0],
                            lng=location[1])
                        if weather_sensor is not None:
                            weather_sensors[
                                flexmeasures_sensor_type] = weather_sensor
                        else:
                            raise Exception(
                                "No weather sensor set up for this sensor type (%s)"
                                % flexmeasures_sensor_type)

                    fc_value = fc[needed_response_label]
                    # the radiation is not available in dark sky -> we compute it ourselves
                    if flexmeasures_sensor_type == "radiation":
                        fc_value = compute_irradiance(
                            location[0],
                            location[1],
                            fc_datetime,
                            fc[needed_response_label],
                        )

                    db_forecasts.append(
                        Weather(
                            datetime=fc_datetime,
                            horizon=fc_horizon,
                            value=fc_value,
                            sensor_id=weather_sensor.id,
                            data_source_id=data_source.id,
                        ))
                else:
                    # we will not fail here, but issue a warning
                    msg = "No label '%s' in response data for time %s" % (
                        needed_response_label,
                        fc_datetime,
                    )
                    click.echo("[FLEXMEASURES] %s" % msg)
                    current_app.logger.warning(msg)
    if len(db_forecasts) == 0:
        # This is probably a serious problem
        raise Exception(
            "Nothing to put in the database was produced. That does not seem right..."
        )
    db.session.bulk_save_objects(db_forecasts)
コード例 #6
0
def populate_time_series_forecasts(  # noqa: C901
    db: SQLAlchemy,
    generic_asset_type: str = None,
    generic_asset_name: str = None,
    from_date: str = "2015-02-08",
    to_date: str = "2015-12-31",
):
    start = ensure_local_timezone(datetime.strptime(from_date, "%Y-%m-%d"),
                                  tz_name=LOCAL_TIME_ZONE)
    end = ensure_local_timezone(
        datetime.strptime(to_date, "%Y-%m-%d") + timedelta(days=1),
        tz_name=LOCAL_TIME_ZONE,
    )
    training_and_testing_period = timedelta(days=30)
    horizons = (
        timedelta(hours=1),
        timedelta(hours=6),
        timedelta(hours=24),
        timedelta(hours=48),
    )

    click.echo(
        "Populating the database %s with time series forecasts of %s ahead ..."
        % (db.engine,
           infl_eng.join([naturaldelta(horizon) for horizon in horizons])))

    # Set a data source for the forecasts
    data_source = DataSource.query.filter_by(name="Seita",
                                             type="demo script").one_or_none()

    # List all generic assets for which to forecast.
    # Look into asset type if no asset name is given. If an asset name is given,
    generic_assets = []
    if generic_asset_name is None:
        if generic_asset_type is None or generic_asset_type == "WeatherSensor":
            sensors = WeatherSensor.query.all()
            generic_assets.extend(sensors)
        if generic_asset_type is None or generic_asset_type == "Asset":
            assets = Asset.query.all()
            generic_assets.extend(assets)
        if generic_asset_type is None or generic_asset_type == "Market":
            markets = Market.query.all()
            generic_assets.extend(markets)
    else:
        if generic_asset_type is None:
            click.echo(
                "If you specify --asset-name, please also specify --asset-type, so we can look it up."
            )
            return
        if generic_asset_type == "WeatherSensor":
            sensors = WeatherSensor.query.filter(
                WeatherSensor.name == generic_asset_name).one_or_none()
            if sensors is not None:
                generic_assets.append(sensors)
        if generic_asset_type == "Asset":
            assets = Asset.query.filter(
                Asset.name == generic_asset_name).one_or_none()
            if assets is not None:
                generic_assets.append(assets)
        if generic_asset_type == "Market":
            markets = Market.query.filter(
                Market.name == generic_asset_name).one_or_none()
            if markets is not None:
                generic_assets.append(markets)
    if not generic_assets:
        click.echo("No such assets in db, so I will not add any forecasts.")
        return

    # Make a model for each asset and horizon, make rolling forecasts and save to database.
    # We cannot use (faster) bulk save, as forecasts might become regressors in other forecasts.
    for generic_asset in generic_assets:
        for horizon in horizons:
            try:
                default_model = lookup_model_specs_configurator()
                model_specs, model_identifier, model_fallback = default_model(
                    generic_asset=generic_asset,
                    forecast_start=start,
                    forecast_end=end,
                    forecast_horizon=horizon,
                    custom_model_params=dict(
                        training_and_testing_period=training_and_testing_period
                    ),
                )
                click.echo(
                    "Computing forecasts of %s ahead for %s, "
                    "from %s to %s with a training and testing period of %s, using %s ..."
                    % (
                        naturaldelta(horizon),
                        generic_asset.name,
                        start,
                        end,
                        naturaldelta(training_and_testing_period),
                        model_identifier,
                    ))
                model_specs.creation_time = start
                forecasts, model_state = make_rolling_forecasts(
                    start=start, end=end, model_specs=model_specs)
            except (NotEnoughDataException, MissingData, NaNData) as e:
                click.echo("Skipping forecasts for asset %s: %s" %
                           (generic_asset, str(e)))
                continue
            """
            import matplotlib.pyplot as plt
            plt.plot(
                model_state.specs.outcome_var.load_series().loc[
                    pd.date_range(start, end=end, freq="15T")
                ],
                label="y",
            )
            plt.plot(forecasts, label="y^hat")
            plt.legend()
            plt.show()
            """

            beliefs = []
            if isinstance(generic_asset, Asset):
                beliefs = [
                    Power(
                        datetime=ensure_local_timezone(
                            dt, tz_name=LOCAL_TIME_ZONE),
                        horizon=horizon,
                        value=value,
                        asset_id=generic_asset.id,
                        data_source_id=data_source.id,
                    ) for dt, value in forecasts.items()
                ]
            elif isinstance(generic_asset, Market):
                beliefs = [
                    Price(
                        datetime=ensure_local_timezone(
                            dt, tz_name=LOCAL_TIME_ZONE),
                        horizon=horizon,
                        value=value,
                        market_id=generic_asset.id,
                        data_source_id=data_source.id,
                    ) for dt, value in forecasts.items()
                ]
            elif isinstance(generic_asset, WeatherSensor):
                beliefs = [
                    Weather(
                        datetime=ensure_local_timezone(
                            dt, tz_name=LOCAL_TIME_ZONE),
                        horizon=horizon,
                        value=value,
                        sensor_id=generic_asset.id,
                        data_source_id=data_source.id,
                    ) for dt, value in forecasts.items()
                ]

            print("Saving %s %s-forecasts for %s..." %
                  (len(beliefs), naturaldelta(horizon), generic_asset.name))
            for belief in beliefs:
                db.session.add(belief)

    click.echo("DB now has %d Power Forecasts" %
               db.session.query(Power).filter(
                   Power.horizon > timedelta(hours=0)).count())
    click.echo("DB now has %d Price Forecasts" %
               db.session.query(Price).filter(
                   Price.horizon > timedelta(hours=0)).count())
    click.echo("DB now has %d Weather Forecasts" %
               db.session.query(Weather).filter(
                   Weather.horizon > timedelta(hours=0)).count())
コード例 #7
0
ファイル: analytics.py プロジェクト: jorisdehaes/flexmeasures
def get_weather_data(
    assets: List[Asset],
    metrics: dict,
    sensor_type: WeatherSensorType,
    query_window: Tuple[datetime, datetime],
    resolution: str,
    forecast_horizon: timedelta,
) -> Tuple[pd.DataFrame, pd.DataFrame, str, WeatherSensor, dict]:
    """Get most recent weather data and forecast weather data for the requested forecast horizon.

    Return weather observations, weather forecasts (either might be an empty DataFrame),
    the name of the sensor type, the weather sensor and a dict with the following metrics:
    - expected value
    - mean absolute error
    - mean absolute percentage error
    - weighted absolute percentage error"""

    # Todo: for now we only collect weather data for a single asset
    asset = assets[0]

    weather_data = tb.BeliefsDataFrame(columns=["event_value"])
    weather_forecast_data = tb.BeliefsDataFrame(columns=["event_value"])
    sensor_type_name = ""
    closest_sensor = None
    if sensor_type:
        # Find the 50 closest weather sensors
        sensor_type_name = sensor_type.name
        closest_sensors = find_closest_weather_sensor(sensor_type_name,
                                                      n=50,
                                                      object=asset)
        if closest_sensors:
            closest_sensor = closest_sensors[0]

            # Collect the weather data for the requested time window
            sensor_names = [sensor.name for sensor in closest_sensors]

            # Get weather data
            weather_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.collect(
                sensor_names,
                query_window=query_window,
                resolution=resolution,
                belief_horizon_window=(None, timedelta(hours=0)),
                sum_multiple=False,
            )
            weather_df_dict: Dict[str, pd.DataFrame] = {}
            for sensor_name in weather_bdf_dict:
                weather_df_dict[sensor_name] = simplify_index(
                    weather_bdf_dict[sensor_name],
                    index_levels_to_columns=["belief_horizon", "source"],
                )

            # Get weather forecasts
            weather_forecast_bdf_dict: Dict[
                str, tb.BeliefsDataFrame] = Weather.collect(
                    sensor_names,
                    query_window=query_window,
                    resolution=resolution,
                    belief_horizon_window=(forecast_horizon, None),
                    source_types=["user", "forecasting script", "script"],
                    sum_multiple=False,
                )
            weather_forecast_df_dict: Dict[str, pd.DataFrame] = {}
            for sensor_name in weather_forecast_bdf_dict:
                weather_forecast_df_dict[sensor_name] = simplify_index(
                    weather_forecast_bdf_dict[sensor_name],
                    index_levels_to_columns=["belief_horizon", "source"],
                )

            # Take the closest weather sensor which contains some data for the selected time window
            for sensor, sensor_name in zip(closest_sensors, sensor_names):
                if (not weather_df_dict[sensor_name]
                    ["event_value"].isnull().values.all()
                        or not weather_forecast_df_dict[sensor_name]
                    ["event_value"].isnull().values.all()):
                    closest_sensor = sensor
                    break

            weather_data = weather_df_dict[sensor_name]
            weather_forecast_data = weather_forecast_df_dict[sensor_name]

            # Calculate the weather metrics
            if not weather_data.empty:
                metrics["realised_weather"] = weather_data["event_value"].mean(
                )
            else:
                metrics["realised_weather"] = np.NaN
            if (not weather_forecast_data.empty
                    and weather_forecast_data.size == weather_data.size):
                metrics["expected_weather"] = weather_forecast_data[
                    "event_value"].mean()
                metrics["mae_weather"] = calculations.mean_absolute_error(
                    weather_data["event_value"],
                    weather_forecast_data["event_value"])
                metrics[
                    "mape_weather"] = calculations.mean_absolute_percentage_error(
                        weather_data["event_value"],
                        weather_forecast_data["event_value"])
                metrics[
                    "wape_weather"] = calculations.weighted_absolute_percentage_error(
                        weather_data["event_value"],
                        weather_forecast_data["event_value"])
            else:
                metrics["expected_weather"] = np.NaN
                metrics["mae_weather"] = np.NaN
                metrics["mape_weather"] = np.NaN
                metrics["wape_weather"] = np.NaN
    return (
        weather_data,
        weather_forecast_data,
        sensor_type_name,
        closest_sensor,
        metrics,
    )