Esempio n. 1
0
def test_compile_hourly_sum_statistics(hass_recorder, caplog, device_class,
                                       unit, native_unit, factor):
    """Test compiling hourly statistics."""
    zero = dt_util.utcnow()
    hass = hass_recorder()
    recorder = hass.data[DATA_INSTANCE]
    setup_component(hass, "sensor", {})
    attributes = {
        "device_class": device_class,
        "state_class": "measurement",
        "unit_of_measurement": unit,
        "last_reset": None,
    }
    seq = [10, 15, 20, 10, 30, 40, 50, 60, 70]

    four, eight, states = record_meter_states(hass, zero, "sensor.test1",
                                              attributes, seq)
    hist = history.get_significant_states(hass, zero - timedelta.resolution,
                                          eight + timedelta.resolution)
    assert dict(states)["sensor.test1"] == dict(hist)["sensor.test1"]

    recorder.do_adhoc_statistics(period="hourly", start=zero)
    wait_recording_done(hass)
    recorder.do_adhoc_statistics(period="hourly",
                                 start=zero + timedelta(hours=1))
    wait_recording_done(hass)
    recorder.do_adhoc_statistics(period="hourly",
                                 start=zero + timedelta(hours=2))
    wait_recording_done(hass)
    statistic_ids = list_statistic_ids(hass)
    assert statistic_ids == [{
        "statistic_id": "sensor.test1",
        "unit_of_measurement": native_unit
    }]
    stats = statistics_during_period(hass, zero)
    assert stats == {
        "sensor.test1": [
            {
                "statistic_id": "sensor.test1",
                "start": process_timestamp_to_utc_isoformat(zero),
                "max": None,
                "mean": None,
                "min": None,
                "last_reset": process_timestamp_to_utc_isoformat(zero),
                "state": approx(factor * seq[2]),
                "sum": approx(factor * 10.0),
            },
            {
                "statistic_id":
                "sensor.test1",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=1)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(factor * seq[5]),
                "sum":
                approx(factor * 10.0),
            },
            {
                "statistic_id":
                "sensor.test1",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=2)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(factor * seq[8]),
                "sum":
                approx(factor * 40.0),
            },
        ]
    }
    assert "Error while processing event StatisticsTask" not in caplog.text
Esempio n. 2
0
def test_compile_hourly_statistics(hass_recorder):
    """Test compiling hourly statistics."""
    hass = hass_recorder()
    recorder = hass.data[DATA_INSTANCE]
    setup_component(hass, "sensor", {})
    zero, four, states = record_states(hass)
    hist = history.get_significant_states(hass, zero, four)
    assert dict(states) == dict(hist)

    for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}):
        stats = statistics_during_period(hass,
                                         zero,
                                         period="5minute",
                                         **kwargs)
        assert stats == {}
    stats = get_last_statistics(hass, 0, "sensor.test1", True)
    assert stats == {}

    recorder.do_adhoc_statistics(start=zero)
    recorder.do_adhoc_statistics(start=four)
    wait_recording_done(hass)
    expected_1 = {
        "statistic_id": "sensor.test1",
        "start": process_timestamp_to_utc_isoformat(zero),
        "end": process_timestamp_to_utc_isoformat(zero + timedelta(minutes=5)),
        "mean": approx(14.915254237288135),
        "min": approx(10.0),
        "max": approx(20.0),
        "last_reset": None,
        "state": None,
        "sum": None,
    }
    expected_2 = {
        "statistic_id": "sensor.test1",
        "start": process_timestamp_to_utc_isoformat(four),
        "end": process_timestamp_to_utc_isoformat(four + timedelta(minutes=5)),
        "mean": approx(20.0),
        "min": approx(20.0),
        "max": approx(20.0),
        "last_reset": None,
        "state": None,
        "sum": None,
    }
    expected_stats1 = [
        {
            **expected_1, "statistic_id": "sensor.test1"
        },
        {
            **expected_2, "statistic_id": "sensor.test1"
        },
    ]
    expected_stats2 = [
        {
            **expected_1, "statistic_id": "sensor.test2"
        },
        {
            **expected_2, "statistic_id": "sensor.test2"
        },
    ]

    # Test statistics_during_period
    stats = statistics_during_period(hass, zero, period="5minute")
    assert stats == {
        "sensor.test1": expected_stats1,
        "sensor.test2": expected_stats2
    }

    stats = statistics_during_period(hass,
                                     zero,
                                     statistic_ids=["sensor.test2"],
                                     period="5minute")
    assert stats == {"sensor.test2": expected_stats2}

    stats = statistics_during_period(hass,
                                     zero,
                                     statistic_ids=["sensor.test3"],
                                     period="5minute")
    assert stats == {}

    # Test get_last_statistics
    stats = get_last_statistics(hass, 0, "sensor.test1", True)
    assert stats == {}

    stats = get_last_statistics(hass, 1, "sensor.test1", True)
    assert stats == {
        "sensor.test1": [{
            **expected_2, "statistic_id": "sensor.test1"
        }]
    }

    stats = get_last_statistics(hass, 2, "sensor.test1", True)
    assert stats == {"sensor.test1": expected_stats1[::-1]}

    stats = get_last_statistics(hass, 3, "sensor.test1", True)
    assert stats == {"sensor.test1": expected_stats1[::-1]}

    stats = get_last_statistics(hass, 1, "sensor.test3", True)
    assert stats == {}
Esempio n. 3
0
def compile_statistics(  # noqa: C901
        hass: HomeAssistant, start: datetime.datetime,
        end: datetime.datetime) -> dict:
    """Compile statistics for all entities during start-end.

    Note: This will query the database and must not be run in the event loop
    """
    result: dict = {}

    entities = _get_entities(hass)

    wanted_statistics = _wanted_statistics(entities)

    # Get history between start and end
    entities_full_history = [
        i[0] for i in entities if "sum" in wanted_statistics[i[0]]
    ]
    history_list = {}
    if entities_full_history:
        history_list = history.get_significant_states(  # type: ignore
            hass,
            start - datetime.timedelta.resolution,
            end,
            entity_ids=entities_full_history,
            significant_changes_only=False,
        )
    entities_significant_history = [
        i[0] for i in entities if "sum" not in wanted_statistics[i[0]]
    ]
    if entities_significant_history:
        _history_list = history.get_significant_states(  # type: ignore
            hass,
            start - datetime.timedelta.resolution,
            end,
            entity_ids=entities_significant_history,
        )
        history_list = {**history_list, **_history_list}

    for entity_id, state_class, device_class in entities:
        if entity_id not in history_list:
            continue

        entity_history = history_list[entity_id]
        unit, fstates = _normalize_states(hass, entity_history, device_class,
                                          entity_id)

        if not fstates:
            continue

        # Check metadata
        if old_metadata := statistics.get_metadata(hass, entity_id):
            if old_metadata["unit_of_measurement"] != unit:
                if WARN_UNSTABLE_UNIT not in hass.data:
                    hass.data[WARN_UNSTABLE_UNIT] = set()
                if entity_id not in hass.data[WARN_UNSTABLE_UNIT]:
                    hass.data[WARN_UNSTABLE_UNIT].add(entity_id)
                    _LOGGER.warning(
                        "The unit of %s (%s) does not match the unit of already "
                        "compiled statistics (%s). Generation of long term statistics "
                        "will be suppressed unless the unit changes back to %s",
                        entity_id,
                        unit,
                        old_metadata["unit_of_measurement"],
                        old_metadata["unit_of_measurement"],
                    )
                continue

        result[entity_id] = {}

        # Set meta data
        result[entity_id]["meta"] = {
            "unit_of_measurement": unit,
            "has_mean": "mean" in wanted_statistics[entity_id],
            "has_sum": "sum" in wanted_statistics[entity_id],
        }

        # Make calculations
        stat: dict = {}
        if "max" in wanted_statistics[entity_id]:
            stat["max"] = max(*itertools.islice(zip(*fstates), 1))
        if "min" in wanted_statistics[entity_id]:
            stat["min"] = min(*itertools.islice(zip(*fstates), 1))

        if "mean" in wanted_statistics[entity_id]:
            stat["mean"] = _time_weighted_average(fstates, start, end)

        if "sum" in wanted_statistics[entity_id]:
            last_reset = old_last_reset = None
            new_state = old_state = None
            _sum = 0
            last_stats = statistics.get_last_statistics(hass, 1, entity_id)
            if entity_id in last_stats:
                # We have compiled history for this sensor before, use that as a starting point
                last_reset = old_last_reset = last_stats[entity_id][0][
                    "last_reset"]
                new_state = old_state = last_stats[entity_id][0]["state"]
                _sum = last_stats[entity_id][0]["sum"] or 0

            for fstate, state in fstates:

                # Deprecated, will be removed in Home Assistant 2021.10
                if ("last_reset" not in state.attributes
                        and state_class == STATE_CLASS_MEASUREMENT):
                    continue

                reset = False
                if (state_class != STATE_CLASS_TOTAL_INCREASING
                        and (last_reset := state.attributes.get("last_reset"))
                        != old_last_reset):
                    if old_state is None:
                        _LOGGER.info(
                            "Compiling initial sum statistics for %s, zero point set to %s",
                            entity_id,
                            fstate,
                        )
                    else:
                        _LOGGER.info(
                            "Detected new cycle for %s, last_reset set to %s (old last_reset %s)",
                            entity_id,
                            last_reset,
                            old_last_reset,
                        )
                    reset = True
                elif old_state is None and last_reset is None:
                    reset = True
                    _LOGGER.info(
                        "Compiling initial sum statistics for %s, zero point set to %s",
                        entity_id,
                        fstate,
                    )
                elif state_class == STATE_CLASS_TOTAL_INCREASING and (
                        old_state is None
                        or reset_detected(hass, entity_id, fstate, new_state)):
                    reset = True
                    _LOGGER.info(
                        "Detected new cycle for %s, value dropped from %s to %s",
                        entity_id,
                        fstate,
                        new_state,
                    )

                if reset:
                    # The sensor has been reset, update the sum
                    if old_state is not None:
                        _sum += new_state - old_state
                    # ..and update the starting point
                    new_state = fstate
                    old_last_reset = last_reset
                    # Force a new cycle for an existing sensor to start at 0
                    if old_state is not None:
                        old_state = 0.0
                    else:
                        old_state = new_state
                else:
                    new_state = fstate
Esempio n. 4
0
def test_rename_entity(hass_recorder):
    """Test statistics is migrated when entity_id is changed."""
    hass = hass_recorder()
    recorder = hass.data[DATA_INSTANCE]
    setup_component(hass, "sensor", {})

    entity_reg = mock_registry(hass)
    reg_entry = entity_reg.async_get_or_create(
        "sensor",
        "test",
        "unique_0000",
        suggested_object_id="test1",
    )
    assert reg_entry.entity_id == "sensor.test1"

    zero, four, states = record_states(hass)
    hist = history.get_significant_states(hass, zero, four)
    assert dict(states) == dict(hist)

    for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}):
        stats = statistics_during_period(hass,
                                         zero,
                                         period="5minute",
                                         **kwargs)
        assert stats == {}
    stats = get_last_statistics(hass, 0, "sensor.test1", True)
    assert stats == {}

    recorder.do_adhoc_statistics(start=zero)
    wait_recording_done(hass)
    expected_1 = {
        "statistic_id": "sensor.test1",
        "start": process_timestamp_to_utc_isoformat(zero),
        "end": process_timestamp_to_utc_isoformat(zero + timedelta(minutes=5)),
        "mean": approx(14.915254237288135),
        "min": approx(10.0),
        "max": approx(20.0),
        "last_reset": None,
        "state": None,
        "sum": None,
    }
    expected_stats1 = [
        {
            **expected_1, "statistic_id": "sensor.test1"
        },
    ]
    expected_stats2 = [
        {
            **expected_1, "statistic_id": "sensor.test2"
        },
    ]
    expected_stats99 = [
        {
            **expected_1, "statistic_id": "sensor.test99"
        },
    ]

    stats = statistics_during_period(hass, zero, period="5minute")
    assert stats == {
        "sensor.test1": expected_stats1,
        "sensor.test2": expected_stats2
    }

    entity_reg.async_update_entity(reg_entry.entity_id,
                                   new_entity_id="sensor.test99")
    hass.block_till_done()

    stats = statistics_during_period(hass, zero, period="5minute")
    assert stats == {
        "sensor.test99": expected_stats99,
        "sensor.test2": expected_stats2
    }
Esempio n. 5
0
    async def handle_presence_simulation(call,
                                         restart=False,
                                         entities_after_restart=None,
                                         delta_after_restart=None):
        """Start the presence simulation"""
        if call is not None:  #if we are here, it is a call of the service, or a restart at the end of a cycle
            if isinstance(call.data.get("entity_id", entities), list):
                overridden_entities = call.data.get("entity_id", entities)
            else:
                overridden_entities = [call.data.get("entity_id", entities)]
            overridden_delta = call.data.get("delta", delta)
            overridden_restore = call.data.get("restore_states",
                                               restoreAfterStop)
        else:  #if we are it is a call from the toggle service or from the turn_on action of the switch entity
            # or this is a restart and the simulation was launched after a restart of HA
            if entities_after_restart is not None:
                overridden_entities = entities_after_restart
            else:
                overridden_entities = entities
            if delta_after_restart is not None:
                overridden_delta = delta_after_restart
            else:
                overridden_delta = delta
            overridden_restore = restoreAfterStop

        #get the switch entity
        entity = hass.data[DOMAIN][SWITCH_PLATFORM][SWITCH]
        _LOGGER.debug("Is already running ? %s", entity.state)
        if is_running():
            _LOGGER.warning(
                "Presence simulation already running. Doing nothing")
            return
        running = True
        #turn on the switch. Not calling turn_on() to avoid calling the start service again
        entity.internal_turn_on()
        _LOGGER.debug("setting restore states %s", overridden_restore)
        await entity.set_restore_states(overridden_restore)
        _LOGGER.debug("Presence simulation started")

        current_date = datetime.now(timezone.utc)
        #compute the start date that will be used in the query to get the historic of the entities
        minus_delta = current_date + timedelta(-overridden_delta)
        #expand the entitiies, meaning replace the groups with the entities in it
        try:
            expanded_entities = await async_expand_entities(overridden_entities
                                                            )
        except Exception as e:
            _LOGGER.error("Error during identifing entities")
            running = False
            entity.internal_turn_off()
            return

        if not restart:
            #set attribute on the switch
            try:
                await entity.set_start_datetime(
                    datetime.now(hass.config.time_zone))
            except Exception as e:
                try:
                    await entity.set_start_datetime(
                        datetime.now(pytz.timezone(hass.config.time_zone)))
                except Exception as e:
                    _LOGGER.warning(
                        "Start datetime could not be set to HA timezone: ", e)
                    await entity.set_start_datetime(datetime.now())
            if overridden_restore:
                service_data = {}
                service_data["scene_id"] = RESTORE_SCENE
                service_data["snapshot_entities"] = expanded_entities
                _LOGGER.debug("Saving scene before launching the simulation")
                try:
                    await hass.services.async_call("scene",
                                                   "create",
                                                   service_data,
                                                   blocking=True)
                except Exception as e:
                    _LOGGER.error(
                        "Scene could not be created, continue without the restore functionality: %s",
                        e)

        await entity.set_entities(expanded_entities)
        await entity.set_delta(overridden_delta)
        _LOGGER.debug("Getting the historic from %s for %s", minus_delta,
                      expanded_entities)
        dic = get_significant_states(hass=hass,
                                     start_time=minus_delta,
                                     entity_ids=expanded_entities,
                                     significant_changes_only=False)
        _LOGGER.debug("history: %s", dic)
        for entity_id in dic:
            _LOGGER.debug('Entity %s', entity_id)
            #launch an async task by entity_id
            hass.async_create_task(
                simulate_single_entity(entity_id, dic[entity_id]))

        #launch an async task that will restart the simulation after the delay has passed
        hass.async_create_task(
            restart_presence_simulation(
                call,
                entities_after_restart=entities_after_restart,
                delta_after_restart=delta_after_restart))
        _LOGGER.debug("All async tasks launched")
Esempio n. 6
0
def compile_statistics(hass: HomeAssistant, start: datetime.datetime,
                       end: datetime.datetime) -> dict:
    """Compile statistics for all entities during start-end.

    Note: This will query the database and must not be run in the event loop
    """
    result: dict = {}

    entities = _get_entities(hass)

    # Get history between start and end
    history_list = history.get_significant_states(  # type: ignore
        hass, start - datetime.timedelta.resolution, end,
        [i[0] for i in entities])

    for entity_id, device_class in entities:
        wanted_statistics = DEVICE_CLASS_STATISTICS[device_class]

        if entity_id not in history_list:
            continue

        entity_history = history_list[entity_id]
        unit, fstates = _normalize_states(entity_history, device_class,
                                          entity_id)

        if not fstates:
            continue

        result[entity_id] = {}

        # Set meta data
        result[entity_id]["meta"] = {
            "unit_of_measurement": unit,
            "has_mean": "mean" in wanted_statistics,
            "has_sum": "sum" in wanted_statistics,
        }

        # Make calculations
        stat: dict = {}
        if "max" in wanted_statistics:
            stat["max"] = max(*itertools.islice(zip(*fstates), 1))
        if "min" in wanted_statistics:
            stat["min"] = min(*itertools.islice(zip(*fstates), 1))

        if "mean" in wanted_statistics:
            stat["mean"] = _time_weighted_average(fstates, start, end)

        if "sum" in wanted_statistics:
            last_reset = old_last_reset = None
            new_state = old_state = None
            _sum = 0
            last_stats = statistics.get_last_statistics(
                hass, 1, entity_id)  # type: ignore
            if entity_id in last_stats:
                # We have compiled history for this sensor before, use that as a starting point
                last_reset = old_last_reset = last_stats[entity_id][0][
                    "last_reset"]
                new_state = old_state = last_stats[entity_id][0]["state"]
                _sum = last_stats[entity_id][0]["sum"]

            for fstate, state in fstates:

                if "last_reset" not in state.attributes:
                    continue
                if (last_reset :=
                        state.attributes["last_reset"]) != old_last_reset:
                    # The sensor has been reset, update the sum
                    if old_state is not None:
                        _sum += new_state - old_state
                    # ..and update the starting point
                    new_state = fstate
                    old_last_reset = last_reset
                    old_state = new_state
                else:
                    new_state = fstate

            if last_reset is None or new_state is None or old_state is None:
                # No valid updates
                result.pop(entity_id)
                continue

            # Update the sum with the last state
            _sum += new_state - old_state
            stat["last_reset"] = dt_util.parse_datetime(last_reset)
            stat["sum"] = _sum
            stat["state"] = new_state

        result[entity_id]["stat"] = stat
Esempio n. 7
0
def test_compile_hourly_energy_statistics(hass_recorder):
    """Test compiling hourly statistics."""
    hass = hass_recorder()
    recorder = hass.data[DATA_INSTANCE]
    setup_component(hass, "sensor", {})
    sns1_attr = {"device_class": "energy", "state_class": "measurement"}
    sns2_attr = {"device_class": "energy"}
    sns3_attr = {}

    zero, four, eight, states = record_energy_states(hass, sns1_attr,
                                                     sns2_attr, sns3_attr)
    hist = history.get_significant_states(hass, zero - timedelta.resolution,
                                          eight + timedelta.resolution)
    assert dict(states)["sensor.test1"] == dict(hist)["sensor.test1"]

    recorder.do_adhoc_statistics(period="hourly", start=zero)
    wait_recording_done(hass)
    recorder.do_adhoc_statistics(period="hourly",
                                 start=zero + timedelta(hours=1))
    wait_recording_done(hass)
    recorder.do_adhoc_statistics(period="hourly",
                                 start=zero + timedelta(hours=2))
    wait_recording_done(hass)
    stats = statistics_during_period(hass, zero)
    assert stats == {
        "sensor.test1": [
            {
                "statistic_id": "sensor.test1",
                "start": process_timestamp_to_utc_isoformat(zero),
                "max": None,
                "mean": None,
                "min": None,
                "last_reset": process_timestamp_to_utc_isoformat(zero),
                "state": approx(20.0),
                "sum": approx(10.0),
            },
            {
                "statistic_id":
                "sensor.test1",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=1)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(40.0),
                "sum":
                approx(10.0),
            },
            {
                "statistic_id":
                "sensor.test1",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=2)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(70.0),
                "sum":
                approx(40.0),
            },
        ]
    }
Esempio n. 8
0
def test_compile_hourly_energy_statistics_multiple(hass_recorder, caplog):
    """Test compiling multiple hourly statistics."""
    zero = dt_util.utcnow()
    hass = hass_recorder()
    recorder = hass.data[DATA_INSTANCE]
    setup_component(hass, "sensor", {})
    sns1_attr = {**ENERGY_SENSOR_ATTRIBUTES, "last_reset": None}
    sns2_attr = {**ENERGY_SENSOR_ATTRIBUTES, "last_reset": None}
    sns3_attr = {
        **ENERGY_SENSOR_ATTRIBUTES,
        "unit_of_measurement": "Wh",
        "last_reset": None,
    }
    sns4_attr = {**ENERGY_SENSOR_ATTRIBUTES}
    seq1 = [10, 15, 20, 10, 30, 40, 50, 60, 70]
    seq2 = [110, 120, 130, 0, 30, 45, 55, 65, 75]
    seq3 = [0, 0, 5, 10, 30, 50, 60, 80, 90]
    seq4 = [0, 0, 5, 10, 30, 50, 60, 80, 90]

    four, eight, states = record_energy_states(hass, zero, "sensor.test1",
                                               sns1_attr, seq1)
    _, _, _states = record_energy_states(hass, zero, "sensor.test2", sns2_attr,
                                         seq2)
    states = {**states, **_states}
    _, _, _states = record_energy_states(hass, zero, "sensor.test3", sns3_attr,
                                         seq3)
    states = {**states, **_states}
    _, _, _states = record_energy_states(hass, zero, "sensor.test4", sns4_attr,
                                         seq4)
    states = {**states, **_states}
    hist = history.get_significant_states(hass, zero - timedelta.resolution,
                                          eight + timedelta.resolution)
    assert dict(states)["sensor.test1"] == dict(hist)["sensor.test1"]

    recorder.do_adhoc_statistics(period="hourly", start=zero)
    wait_recording_done(hass)
    recorder.do_adhoc_statistics(period="hourly",
                                 start=zero + timedelta(hours=1))
    wait_recording_done(hass)
    recorder.do_adhoc_statistics(period="hourly",
                                 start=zero + timedelta(hours=2))
    wait_recording_done(hass)
    statistic_ids = list_statistic_ids(hass)
    assert statistic_ids == [
        {
            "statistic_id": "sensor.test1",
            "unit_of_measurement": "kWh"
        },
        {
            "statistic_id": "sensor.test2",
            "unit_of_measurement": "kWh"
        },
        {
            "statistic_id": "sensor.test3",
            "unit_of_measurement": "kWh"
        },
    ]
    stats = statistics_during_period(hass, zero)
    assert stats == {
        "sensor.test1": [
            {
                "statistic_id": "sensor.test1",
                "start": process_timestamp_to_utc_isoformat(zero),
                "max": None,
                "mean": None,
                "min": None,
                "last_reset": process_timestamp_to_utc_isoformat(zero),
                "state": approx(20.0),
                "sum": approx(10.0),
            },
            {
                "statistic_id":
                "sensor.test1",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=1)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(40.0),
                "sum":
                approx(10.0),
            },
            {
                "statistic_id":
                "sensor.test1",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=2)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(70.0),
                "sum":
                approx(40.0),
            },
        ],
        "sensor.test2": [
            {
                "statistic_id": "sensor.test2",
                "start": process_timestamp_to_utc_isoformat(zero),
                "max": None,
                "mean": None,
                "min": None,
                "last_reset": process_timestamp_to_utc_isoformat(zero),
                "state": approx(130.0),
                "sum": approx(20.0),
            },
            {
                "statistic_id":
                "sensor.test2",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=1)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(45.0),
                "sum":
                approx(-95.0),
            },
            {
                "statistic_id":
                "sensor.test2",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=2)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(75.0),
                "sum":
                approx(-65.0),
            },
        ],
        "sensor.test3": [
            {
                "statistic_id": "sensor.test3",
                "start": process_timestamp_to_utc_isoformat(zero),
                "max": None,
                "mean": None,
                "min": None,
                "last_reset": process_timestamp_to_utc_isoformat(zero),
                "state": approx(5.0 / 1000),
                "sum": approx(5.0 / 1000),
            },
            {
                "statistic_id":
                "sensor.test3",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=1)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(50.0 / 1000),
                "sum":
                approx(30.0 / 1000),
            },
            {
                "statistic_id":
                "sensor.test3",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=2)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(90.0 / 1000),
                "sum":
                approx(70.0 / 1000),
            },
        ],
    }
    assert "Error while processing event StatisticsTask" not in caplog.text