Example #1
0
def test_run_information(hass_recorder):
    """Ensure run_information returns expected data."""
    before_start_recording = dt_util.utcnow()
    hass = hass_recorder()
    run_info = run_information_from_instance(hass)
    assert isinstance(run_info, RecorderRuns)
    assert run_info.closed_incorrect is False

    with session_scope(hass=hass) as session:
        run_info = run_information_with_session(session)
        assert isinstance(run_info, RecorderRuns)
        assert run_info.closed_incorrect is False

    run_info = run_information(hass)
    assert isinstance(run_info, RecorderRuns)
    assert run_info.closed_incorrect is False

    hass.states.set("test.two", "on", {})
    wait_recording_done(hass)
    run_info = run_information(hass)
    assert isinstance(run_info, RecorderRuns)
    assert run_info.closed_incorrect is False

    run_info = run_information(hass, before_start_recording)
    assert run_info is None

    run_info = run_information(hass, dt_util.utcnow())
    assert isinstance(run_info, RecorderRuns)
    assert run_info.closed_incorrect is False
Example #2
0
def get_states(hass, utc_point_in_time, entity_ids=None, run=None, filters=None):
    """Return the states at a specific point in time."""
    if run is None:
        run = recorder.run_information_from_instance(hass, utc_point_in_time)

        # History did not run before utc_point_in_time
        if run is None:
            return []

    with session_scope(hass=hass) as session:
        return _get_states_with_session(
            session, utc_point_in_time, entity_ids, run, filters
        )
Example #3
0
def get_states(
    hass: HomeAssistant,
    utc_point_in_time: datetime,
    entity_ids: list[str] | None = None,
    run: RecorderRuns | None = None,
    filters: Any = None,
    no_attributes: bool = False,
) -> list[LazyState]:
    """Return the states at a specific point in time."""
    if (run is None and (run := (recorder.run_information_from_instance(
            hass, utc_point_in_time))) is None):
        # History did not run before utc_point_in_time
        return []
Example #4
0
def _states_to_json(
    hass,
    session,
    states,
    start_time,
    entity_ids,
    filters=None,
    include_start_time_state=True,
):
    """Convert SQL results into JSON friendly data structure.

    This takes our state list and turns it into a JSON friendly data
    structure {'entity_id': [list of states], 'entity_id2': [list of states]}

    We also need to go back and create a synthetic zero data point for
    each list of states, otherwise our graphs won't start on the Y
    axis correctly.
    """
    result = defaultdict(list)
    # Set all entity IDs to empty lists in result set to maintain the order
    if entity_ids is not None:
        for ent_id in entity_ids:
            result[ent_id] = []

    # Get the states at the start time
    timer_start = time.perf_counter()
    if include_start_time_state:
        run = recorder.run_information_from_instance(hass, start_time)
        for state in _get_states_with_session(session,
                                              start_time,
                                              entity_ids,
                                              run=run,
                                              filters=filters):
            state.last_changed = start_time
            state.last_updated = start_time
            result[state.entity_id].append(state)

    if _LOGGER.isEnabledFor(logging.DEBUG):
        elapsed = time.perf_counter() - timer_start
        _LOGGER.debug("getting %d first datapoints took %fs", len(result),
                      elapsed)

    # Append all changes to it
    for ent_id, group in groupby(states, lambda state: state.entity_id):
        result[ent_id].extend(group)

    # Filter out the empty lists if some states had 0 results.
    return {key: val for key, val in result.items() if val}
Example #5
0
def _get_states_with_session(
    hass: HomeAssistant,
    session: Session,
    utc_point_in_time: datetime,
    entity_ids: list[str] | None = None,
    run: RecorderRuns | None = None,
    filters: Any | None = None,
    no_attributes: bool = False,
) -> list[LazyState]:
    """Return the states at a specific point in time."""
    if entity_ids and len(entity_ids) == 1:
        return _get_single_entity_states_with_session(hass, session,
                                                      utc_point_in_time,
                                                      entity_ids[0],
                                                      no_attributes)

    if (run is None and (run := (recorder.run_information_from_instance(
            hass, utc_point_in_time))) is None):
        # History did not run before utc_point_in_time
        return []
Example #6
0
def _sorted_states_to_json(
    hass,
    session,
    states,
    start_time,
    entity_ids,
    filters=None,
    include_start_time_state=True,
    minimal_response=False,
):
    """Convert SQL results into JSON friendly data structure.

    This takes our state list and turns it into a JSON friendly data
    structure {'entity_id': [list of states], 'entity_id2': [list of states]}

    States must be sorted by entity_id and last_updated

    We also need to go back and create a synthetic zero data point for
    each list of states, otherwise our graphs won't start on the Y
    axis correctly.
    """
    result = defaultdict(list)
    # Set all entity IDs to empty lists in result set to maintain the order
    if entity_ids is not None:
        for ent_id in entity_ids:
            result[ent_id] = []

    # Get the states at the start time
    timer_start = time.perf_counter()
    if include_start_time_state:
        run = recorder.run_information_from_instance(hass, start_time)
        for state in _get_states_with_session(
            session, start_time, entity_ids, run=run, filters=filters
        ):
            state.last_changed = start_time
            state.last_updated = start_time
            result[state.entity_id].append(state)

    if _LOGGER.isEnabledFor(logging.DEBUG):
        elapsed = time.perf_counter() - timer_start
        _LOGGER.debug("getting %d first datapoints took %fs", len(result), elapsed)

    # Called in a tight loop so cache the function
    # here
    _process_timestamp_to_utc_isoformat = process_timestamp_to_utc_isoformat

    # Append all changes to it
    for ent_id, group in groupby(states, lambda state: state.entity_id):
        domain = split_entity_id(ent_id)[0]
        ent_results = result[ent_id]
        if not minimal_response or domain in NEED_ATTRIBUTE_DOMAINS:
            ent_results.extend(
                [
                    native_state
                    for native_state in (LazyState(db_state) for db_state in group)
                    if (
                        domain != SCRIPT_DOMAIN
                        or native_state.attributes.get(ATTR_CAN_CANCEL)
                    )
                ]
            )
            continue

        # With minimal response we only provide a native
        # State for the first and last response. All the states
        # in-between only provide the "state" and the
        # "last_changed".
        if not ent_results:
            ent_results.append(LazyState(next(group)))

        prev_state = ent_results[-1]
        initial_state_count = len(ent_results)

        for db_state in group:
            # With minimal response we do not care about attribute
            # changes so we can filter out duplicate states
            if db_state.state == prev_state.state:
                continue

            ent_results.append(
                {
                    STATE_KEY: db_state.state,
                    LAST_CHANGED_KEY: _process_timestamp_to_utc_isoformat(
                        db_state.last_changed
                    ),
                }
            )
            prev_state = db_state

        if prev_state and len(ent_results) != initial_state_count:
            # There was at least one state change
            # replace the last minimal state with
            # a full state
            ent_results[-1] = LazyState(prev_state)

    # Filter out the empty lists if some states had 0 results.
    return {key: val for key, val in result.items() if val}
Example #7
0
def _sorted_states_to_dict(
    hass: HomeAssistant,
    session: Session,
    states: Iterable[States],
    start_time: datetime,
    entity_ids: list[str] | None,
    filters: Any = None,
    include_start_time_state: bool = True,
    minimal_response: bool = False,
    no_attributes: bool = False,
) -> MutableMapping[str, list[State | dict[str, Any]]]:
    """Convert SQL results into JSON friendly data structure.

    This takes our state list and turns it into a JSON friendly data
    structure {'entity_id': [list of states], 'entity_id2': [list of states]}

    States must be sorted by entity_id and last_updated

    We also need to go back and create a synthetic zero data point for
    each list of states, otherwise our graphs won't start on the Y
    axis correctly.
    """
    result: dict[str, list[State | dict[str, Any]]] = defaultdict(list)
    # Set all entity IDs to empty lists in result set to maintain the order
    if entity_ids is not None:
        for ent_id in entity_ids:
            result[ent_id] = []

    # Get the states at the start time
    timer_start = time.perf_counter()
    if include_start_time_state:
        run = recorder.run_information_from_instance(hass, start_time)
        for state in _get_states_with_session(
                hass,
                session,
                start_time,
                entity_ids,
                run=run,
                filters=filters,
                no_attributes=no_attributes,
        ):
            state.last_changed = start_time
            state.last_updated = start_time
            result[state.entity_id].append(state)

    if _LOGGER.isEnabledFor(logging.DEBUG):
        elapsed = time.perf_counter() - timer_start
        _LOGGER.debug("getting %d first datapoints took %fs", len(result),
                      elapsed)

    # Called in a tight loop so cache the function
    # here
    _process_timestamp_to_utc_isoformat = process_timestamp_to_utc_isoformat

    if entity_ids and len(entity_ids) == 1:
        states_iter: Iterable[tuple[str | Column,
                                    Iterator[States]]] = ((entity_ids[0],
                                                           iter(states)), )
    else:
        states_iter = groupby(states, lambda state: state.entity_id)

    # Append all changes to it
    for ent_id, group in states_iter:
        ent_results = result[ent_id]
        attr_cache: dict[str, dict[str, Any]] = {}

        if not minimal_response or split_entity_id(
                ent_id)[0] in NEED_ATTRIBUTE_DOMAINS:
            ent_results.extend(
                LazyState(db_state, attr_cache) for db_state in group)
            continue

        # With minimal response we only provide a native
        # State for the first and last response. All the states
        # in-between only provide the "state" and the
        # "last_changed".
        if not ent_results:
            if (first_state := next(group, None)) is None:
                continue
            ent_results.append(LazyState(first_state, attr_cache))

        prev_state = ent_results[-1]
        assert isinstance(prev_state, LazyState)
        initial_state_count = len(ent_results)

        for db_state in group:
            # With minimal response we do not care about attribute
            # changes so we can filter out duplicate states
            if db_state.state == prev_state.state:
                continue

            ent_results.append({
                STATE_KEY:
                db_state.state,
                LAST_CHANGED_KEY:
                _process_timestamp_to_utc_isoformat(db_state.last_changed),
            })
            prev_state = db_state

        if prev_state and len(ent_results) != initial_state_count:
            # There was at least one state change
            # replace the last minimal state with
            # a full state
            ent_results[-1] = LazyState(prev_state, attr_cache)