示例#1
0
def test_get_significant_states_are_ordered(opp_recorder):
    """Test order of results from get_significant_states.

    When entity ids are given, the results should be returned with the data
    in the same order.
    """
    opp = opp_recorder()
    zero, four, _states = record_states(opp)
    entity_ids = ["media_player.test", "media_player.test2"]
    hist = history.get_significant_states(opp, zero, four, entity_ids)
    assert list(hist.keys()) == entity_ids
    entity_ids = ["media_player.test2", "media_player.test"]
    hist = history.get_significant_states(opp, zero, four, entity_ids)
    assert list(hist.keys()) == entity_ids
示例#2
0
def test_get_significant_states_only(opp_recorder):
    """Test significant states when significant_states_only is set."""
    opp = opp_recorder()
    entity_id = "sensor.test"

    def set_state(state, **kwargs):
        """Set the state."""
        opp.states.set(entity_id, state, **kwargs)
        wait_recording_done(opp)
        return opp.states.get(entity_id)

    start = dt_util.utcnow() - timedelta(minutes=4)
    points = []
    for i in range(1, 4):
        points.append(start + timedelta(minutes=i))

    states = []
    with patch("openpeerpower.components.recorder.dt_util.utcnow", return_value=start):
        set_state("123", attributes={"attribute": 10.64})

    with patch(
        "openpeerpower.components.recorder.dt_util.utcnow", return_value=points[0]
    ):
        # Attributes are different, state not
        states.append(set_state("123", attributes={"attribute": 21.42}))

    with patch(
        "openpeerpower.components.recorder.dt_util.utcnow", return_value=points[1]
    ):
        # state is different, attributes not
        states.append(set_state("32", attributes={"attribute": 21.42}))

    with patch(
        "openpeerpower.components.recorder.dt_util.utcnow", return_value=points[2]
    ):
        # everything is different
        states.append(set_state("412", attributes={"attribute": 54.23}))

    hist = history.get_significant_states(opp, start, significant_changes_only=True)

    assert len(hist[entity_id]) == 2
    assert states[0] not in hist[entity_id]
    assert states[1] in hist[entity_id]
    assert states[2] in hist[entity_id]

    hist = history.get_significant_states(opp, start, significant_changes_only=False)

    assert len(hist[entity_id]) == 3
    assert states == hist[entity_id]
示例#3
0
def test_get_significant_states_with_initial(opp_history):
    """Test that only significant states are returned.

    We should get back every thermostat change that
    includes an attribute change, but only the state updates for
    media player (attribute changes are not significant and not returned).
    """
    opp = opp_history
    zero, four, states = record_states(opp)
    one = zero + timedelta(seconds=1)
    one_and_half = zero + timedelta(seconds=1.5)
    for entity_id in states:
        if entity_id == "media_player.test":
            states[entity_id] = states[entity_id][1:]
        for state in states[entity_id]:
            if state.last_changed == one:
                state.last_changed = one_and_half

    hist = get_significant_states(
        opp,
        one_and_half,
        four,
        filters=history.Filters(),
        include_start_time_state=True,
    )
    assert states == hist
示例#4
0
def test_get_significant_states_minimal_response(opp_history):
    """Test that only significant states are returned.

    When minimal responses is set only the first and
    last states return a complete state.

    We should get back every thermostat change that
    includes an attribute change, but only the state updates for
    media player (attribute changes are not significant and not returned).
    """
    opp = opp_history
    zero, four, states = record_states(opp)
    hist = get_significant_states(opp,
                                  zero,
                                  four,
                                  filters=history.Filters(),
                                  minimal_response=True)

    # The second media_player.test state is reduced
    # down to last_changed and state when minimal_response
    # is set.  We use JSONEncoder to make sure that are
    # pre-encoded last_changed is always the same as what
    # will happen with encoding a native state
    input_state = states["media_player.test"][1]
    orig_last_changed = json.dumps(
        process_timestamp(input_state.last_changed),
        cls=JSONEncoder,
    ).replace('"', "")
    orig_state = input_state.state
    states["media_player.test"][1] = {
        "last_changed": orig_last_changed,
        "state": orig_state,
    }

    assert states == hist
示例#5
0
def test_get_significant_states(opp_history):
    """Test that only significant states are returned.

    We should get back every thermostat change that
    includes an attribute change, but only the state updates for
    media player (attribute changes are not significant and not returned).
    """
    opp = opp_history
    zero, four, states = record_states(opp)
    hist = get_significant_states(opp, zero, four, filters=history.Filters())
    assert states == hist
示例#6
0
def test_get_significant_states_entity_id(opp_recorder):
    """Test that only significant states are returned for one entity."""
    opp = opp_recorder()
    zero, four, states = record_states(opp)
    del states["media_player.test2"]
    del states["media_player.test3"]
    del states["thermostat.test"]
    del states["thermostat.test2"]
    del states["script.can_cancel_this_one"]

    hist = history.get_significant_states(opp, zero, four, ["media_player.test"])
    assert states == hist
示例#7
0
def test_compile_hourly_statistics_unavailable(opp_recorder):
    """Test compiling hourly statistics, with the sensor being unavailable."""
    opp = opp_recorder()
    recorder = opp.data[DATA_INSTANCE]
    setup_component(opp, "sensor", {})
    zero, four, states = record_states_partially_unavailable(opp)
    hist = history.get_significant_states(opp, zero, four)
    assert dict(states) == dict(hist)

    recorder.do_adhoc_statistics(period="hourly", start=four)
    wait_recording_done(opp)
    stats = statistics_during_period(opp, four)
    assert stats == {}
示例#8
0
def check_significant_states(opp, zero, four, states, config):
    """Check if significant states are retrieved."""
    filters = history.Filters()
    exclude = config[history.DOMAIN].get(history.CONF_EXCLUDE)
    if exclude:
        filters.excluded_entities = exclude.get(history.CONF_ENTITIES, [])
        filters.excluded_domains = exclude.get(history.CONF_DOMAINS, [])
    include = config[history.DOMAIN].get(history.CONF_INCLUDE)
    if include:
        filters.included_entities = include.get(history.CONF_ENTITIES, [])
        filters.included_domains = include.get(history.CONF_DOMAINS, [])

    hist = get_significant_states(opp, zero, four, filters=filters)
    assert states == hist
示例#9
0
def test_get_significant_states_multiple_entity_ids(opp_history):
    """Test that only significant states are returned for one entity."""
    opp = opp_history
    zero, four, states = record_states(opp)
    del states["media_player.test2"]
    del states["media_player.test3"]
    del states["thermostat.test2"]
    del states["script.can_cancel_this_one"]

    hist = get_significant_states(
        opp,
        zero,
        four,
        ["media_player.test", "thermostat.test"],
        filters=history.Filters(),
    )
    assert states == hist
示例#10
0
def test_get_significant_states_without_initial(opp_history):
    """Test that only significant states are returned.

    We should get back every thermostat change that
    includes an attribute change, but only the state updates for
    media player (attribute changes are not significant and not returned).
    """
    opp = opp_history
    zero, four, states = record_states(opp)
    one = zero + timedelta(seconds=1)
    one_and_half = zero + timedelta(seconds=1.5)
    for entity_id in states:
        states[entity_id] = list(
            filter(lambda s: s.last_changed != one, states[entity_id]))
    del states["media_player.test2"]

    hist = get_significant_states(
        opp,
        one_and_half,
        four,
        filters=history.Filters(),
        include_start_time_state=False,
    )
    assert states == hist
示例#11
0
def test_compile_hourly_statistics(opp_recorder):
    """Test compiling hourly statistics."""
    opp = opp_recorder()
    recorder = opp.data[DATA_INSTANCE]
    setup_component(opp, "sensor", {})
    zero, four, states = record_states(opp)
    hist = history.get_significant_states(opp, zero, four)
    assert dict(states) == dict(hist)

    recorder.do_adhoc_statistics(period="hourly", start=zero)
    wait_recording_done(opp)
    stats = statistics_during_period(opp, zero)
    assert stats == {
        "sensor.test1": [{
            "statistic_id": "sensor.test1",
            "start": process_timestamp_to_utc_isoformat(zero),
            "mean": approx(14.915254237288135),
            "min": approx(10.0),
            "max": approx(20.0),
            "last_reset": None,
            "state": None,
            "sum": None,
        }]
    }
示例#12
0
def get_significant_states(opp, *args, **kwargs):
    """Wrap _get_significant_states with an sql session."""
    return history.get_significant_states(opp, *args, **kwargs)
示例#13
0
def compile_statistics(opp: OpenPeerPower, start: datetime.datetime,
                       end: datetime.datetime) -> dict:
    """Compile statistics for all entities during start-end.

    Note: This will query the database and must not be run in the event loop
    """
    result: dict = {}

    entities = _get_entities(opp)

    # Get history between start and end
    history_list = history.get_significant_states(  # type: ignore
        opp, start - datetime.timedelta.resolution, end,
        [i[0] for i in entities])

    for entity_id, device_class in entities:
        wanted_statistics = DEVICE_CLASS_STATISTICS[device_class]

        if entity_id not in history_list:
            continue

        entity_history = history_list[entity_id]
        fstates = [(float(el.state), el) for el in entity_history
                   if _is_number(el.state)]

        if not fstates:
            continue

        result[entity_id] = {}

        # Make calculations
        if "max" in wanted_statistics:
            result[entity_id]["max"] = max(*itertools.islice(zip(*fstates), 1))
        if "min" in wanted_statistics:
            result[entity_id]["min"] = min(*itertools.islice(zip(*fstates), 1))

        if "mean" in wanted_statistics:
            result[entity_id]["mean"] = _time_weighted_average(
                fstates, start, end)

        if "sum" in wanted_statistics:
            last_reset = old_last_reset = None
            new_state = old_state = None
            _sum = 0
            last_stats = statistics.get_last_statistics(
                opp, 1, entity_id)  # type: ignore
            if entity_id in last_stats:
                # We have compiled history for this sensor before, use that as a starting point
                last_reset = old_last_reset = last_stats[entity_id][0][
                    "last_reset"]
                new_state = old_state = last_stats[entity_id][0]["state"]
                _sum = last_stats[entity_id][0]["sum"]

            for fstate, state in fstates:
                if "last_reset" not in state.attributes:
                    continue
                if (last_reset :=
                        state.attributes["last_reset"]) != old_last_reset:
                    # The sensor has been reset, update the sum
                    if old_state is not None:
                        _sum += new_state - old_state
                    # ..and update the starting point
                    new_state = fstate
                    old_last_reset = last_reset
                    old_state = new_state
                else:
                    new_state = fstate

            if last_reset is None or new_state is None or old_state is None:
                # No valid updates
                result.pop(entity_id)
                continue

            # Update the sum with the last state
            _sum += new_state - old_state
            result[entity_id]["last_reset"] = dt_util.parse_datetime(
                last_reset)
            result[entity_id]["sum"] = _sum
            result[entity_id]["state"] = new_state
示例#14
0
def test_compile_hourly_energy_statistics(opp_recorder):
    """Test compiling hourly statistics."""
    opp = opp_recorder()
    recorder = opp.data[DATA_INSTANCE]
    setup_component(opp, "sensor", {})
    sns1_attr = {"device_class": "energy", "state_class": "measurement"}
    sns2_attr = {"device_class": "energy"}
    sns3_attr = {}

    zero, four, eight, states = record_energy_states(opp, sns1_attr, sns2_attr,
                                                     sns3_attr)
    hist = history.get_significant_states(opp, zero - timedelta.resolution,
                                          eight + timedelta.resolution)
    assert dict(states)["sensor.test1"] == dict(hist)["sensor.test1"]

    recorder.do_adhoc_statistics(period="hourly", start=zero)
    wait_recording_done(opp)
    recorder.do_adhoc_statistics(period="hourly",
                                 start=zero + timedelta(hours=1))
    wait_recording_done(opp)
    recorder.do_adhoc_statistics(period="hourly",
                                 start=zero + timedelta(hours=2))
    wait_recording_done(opp)
    stats = statistics_during_period(opp, zero)
    assert stats == {
        "sensor.test1": [
            {
                "statistic_id": "sensor.test1",
                "start": process_timestamp_to_utc_isoformat(zero),
                "max": None,
                "mean": None,
                "min": None,
                "last_reset": process_timestamp_to_utc_isoformat(zero),
                "state": approx(20.0),
                "sum": approx(10.0),
            },
            {
                "statistic_id":
                "sensor.test1",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=1)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(40.0),
                "sum":
                approx(10.0),
            },
            {
                "statistic_id":
                "sensor.test1",
                "start":
                process_timestamp_to_utc_isoformat(zero + timedelta(hours=2)),
                "max":
                None,
                "mean":
                None,
                "min":
                None,
                "last_reset":
                process_timestamp_to_utc_isoformat(four),
                "state":
                approx(70.0),
                "sum":
                approx(40.0),
            },
        ]
    }