def _add_events(hass, events):
    with session_scope(hass=hass) as session:
        session.query(Events).delete(synchronize_session=False)
    for event_type in events:
        hass.bus.fire(event_type)
        hass.block_till_done()
    hass.data[DATA_INSTANCE].block_till_done()

    with session_scope(hass=hass) as session:
        return [ev.to_native() for ev in session.query(Events)]
def _get_events(hass, config, start_day, end_day, entity_id=None):
    """Get events for a period of time."""
    from homeassistant.components.recorder.models import Events, States
    from homeassistant.components.recorder.util import (
        execute, session_scope)

    entities_filter = _generate_filter_from_config(config)

    with session_scope(hass=hass) as session:
        if entity_id is not None:
            entity_ids = [entity_id.lower()]
        else:
            entity_ids = _get_related_entity_ids(session, entities_filter)

        query = session.query(Events).order_by(Events.time_fired) \
            .outerjoin(States, (Events.event_id == States.event_id)) \
            .filter(Events.event_type.in_(ALL_EVENT_TYPES)) \
            .filter((Events.time_fired > start_day)
                    & (Events.time_fired < end_day)) \
            .filter(((States.last_updated == States.last_changed) &
                     States.entity_id.in_(entity_ids))
                    | (States.state_id.is_(None)))

        events = execute(query)

    return humanify(hass, _exclude_events(events, entities_filter))
    def test_purge_method(self):
        """Test purge method."""
        service_data = {'keep_days': 4}
        self._add_test_events()
        self._add_test_states()

        # make sure we start with 6 states
        with session_scope(hass=self.hass) as session:
            states = session.query(States)
            self.assertEqual(states.count(), 7)

            events = session.query(Events).filter(
                Events.event_type.like("EVENT_TEST%"))
            self.assertEqual(events.count(), 7)

            self.hass.data[DATA_INSTANCE].block_till_done()

            # run purge method - no service data, use defaults
            self.hass.services.call('recorder', 'purge')
            self.hass.async_block_till_done()

            # Small wait for recorder thread
            self.hass.data[DATA_INSTANCE].block_till_done()

            # only purged old events
            self.assertEqual(states.count(), 5)
            self.assertEqual(events.count(), 5)

            # run purge method - correct service data
            self.hass.services.call('recorder', 'purge',
                                    service_data=service_data)
            self.hass.async_block_till_done()

            # Small wait for recorder thread
            self.hass.data[DATA_INSTANCE].block_till_done()

            # we should only have 3 states left after purging
            self.assertEqual(states.count(), 3)

            # the protected state is among them
            self.assertTrue('iamprotected' in (
                state.state for state in states))

            # now we should only have 3 events left
            self.assertEqual(events.count(), 3)

            # and the protected event is among them
            self.assertTrue('EVENT_TEST_FOR_PROTECTED' in (
                event.event_type for event in events.all()))
            self.assertFalse('EVENT_TEST_PURGE' in (
                event.event_type for event in events.all()))

            # run purge method - correct service data, with repack
            service_data['repack'] = True
            self.assertFalse(self.hass.data[DATA_INSTANCE].did_vacuum)
            self.hass.services.call('recorder', 'purge',
                                    service_data=service_data)
            self.hass.async_block_till_done()
            self.hass.data[DATA_INSTANCE].block_till_done()
            self.assertTrue(self.hass.data[DATA_INSTANCE].did_vacuum)
Exemple #4
0
def _get_events(hass, config, start_day, end_day, entity_id=None):
    """Get events for a period of time."""
    from homeassistant.components.recorder.models import Events, States
    from homeassistant.components.recorder.util import session_scope

    entities_filter = _generate_filter_from_config(config)

    def yield_events(query):
        """Yield Events that are not filtered away."""
        for row in query.yield_per(500):
            event = row.to_native()
            if _keep_event(event, entities_filter):
                yield event

    with session_scope(hass=hass) as session:
        if entity_id is not None:
            entity_ids = [entity_id.lower()]
        else:
            entity_ids = _get_related_entity_ids(session, entities_filter)

        query = session.query(Events).order_by(Events.time_fired) \
            .outerjoin(States, (Events.event_id == States.event_id)) \
            .filter(Events.event_type.in_(ALL_EVENT_TYPES)) \
            .filter((Events.time_fired > start_day)
                    & (Events.time_fired < end_day)) \
            .filter(((States.last_updated == States.last_changed) &
                     States.entity_id.in_(entity_ids))
                    | (States.state_id.is_(None)))

        return list(humanify(hass, yield_events(query)))
Exemple #5
0
    async def _load_history_from_db(self):
        """Load the history of the brightness values from the database.

        This only needs to be done once during startup.
        """
        from homeassistant.components.recorder.models import States
        start_date = datetime.now() - timedelta(days=self._conf_check_days)
        entity_id = self._readingmap.get(READING_BRIGHTNESS)
        if entity_id is None:
            _LOGGER.debug("not reading the history from the database as "
                          "there is no brightness sensor configured.")
            return

        _LOGGER.debug("initializing values for %s from the database",
                      self._name)
        with session_scope(hass=self.hass) as session:
            query = session.query(States).filter(
                (States.entity_id == entity_id.lower()) and
                (States.last_updated > start_date)
            ).order_by(States.last_updated.asc())
            states = execute(query)

            for state in states:
                # filter out all None, NaN and "unknown" states
                # only keep real values
                try:
                    self._brightness_history.add_measurement(
                        int(state.state), state.last_updated)
                except ValueError:
                    pass
        _LOGGER.debug("initializing from database completed")
        self.async_schedule_update_ha_state()
Exemple #6
0
def get_significant_states(hass, start_time, end_time=None, entity_id=None,
                           filters=None):
    """
    Return states changes during UTC period start_time - end_time.

    Significant states are all states where there is a state change,
    as well as all states from certain domains (for instance
    thermostat so that we get current temperature in our graphs).
    """
    from homeassistant.components.recorder.models import States

    entity_ids = (entity_id.lower(), ) if entity_id is not None else None

    with session_scope(hass=hass) as session:
        query = session.query(States).filter(
            (States.domain.in_(SIGNIFICANT_DOMAINS) |
             (States.last_changed == States.last_updated)) &
            (States.last_updated > start_time))

        if filters:
            query = filters.apply(query, entity_ids)

        if end_time is not None:
            query = query.filter(States.last_updated < end_time)

        states = (
            state for state in execute(
                query.order_by(States.entity_id, States.last_updated))
            if (_is_significant(state) and
                not state.attributes.get(ATTR_HIDDEN, False)))

    return states_to_json(hass, states, start_time, entity_id, filters)
Exemple #7
0
def get_states(hass, utc_point_in_time, entity_ids=None, run=None,
               filters=None):
    """Return the states at a specific point in time."""
    from homeassistant.components.recorder.models import States

    if run is None:
        run = recorder.run_information(hass, utc_point_in_time)

        # History did not run before utc_point_in_time
        if run is None:
            return []

    from sqlalchemy import and_, func

    with session_scope(hass=hass) as session:
        most_recent_state_ids = session.query(
            func.max(States.state_id).label('max_state_id')
        ).filter(
            (States.created >= run.start) &
            (States.created < utc_point_in_time) &
            (~States.domain.in_(IGNORE_DOMAINS)))

        if filters:
            most_recent_state_ids = filters.apply(most_recent_state_ids,
                                                  entity_ids)

        most_recent_state_ids = most_recent_state_ids.group_by(
            States.entity_id).subquery()

        query = session.query(States).join(most_recent_state_ids, and_(
            States.state_id == most_recent_state_ids.c.max_state_id))

        return [state for state in execute(query)
                if not state.attributes.get(ATTR_HIDDEN, False)]
def _add_entities(hass, entity_ids):
    """Add entities."""
    attributes = {'test_attr': 5, 'test_attr_10': 'nice'}
    for idx, entity_id in enumerate(entity_ids):
        hass.states.set(entity_id, 'state{}'.format(idx), attributes)
        hass.block_till_done()
    hass.data[DATA_INSTANCE].block_till_done()

    with session_scope(hass=hass) as session:
        return [st.to_native() for st in session.query(States)]
    def test_purge_method(self):
        """Test purge method."""
        service_data = {'keep_days': 4}
        self._add_test_events()
        self._add_test_states()

        # make sure we start with 6 states
        with session_scope(hass=self.hass) as session:
            states = session.query(States)
            assert states.count() == 6

            events = session.query(Events).filter(
                Events.event_type.like("EVENT_TEST%"))
            assert events.count() == 6

            self.hass.data[DATA_INSTANCE].block_till_done()

            # run purge method - no service data, use defaults
            self.hass.services.call('recorder', 'purge')
            self.hass.block_till_done()

            # Small wait for recorder thread
            self.hass.data[DATA_INSTANCE].block_till_done()

            # only purged old events
            assert states.count() == 4
            assert events.count() == 4

            # run purge method - correct service data
            self.hass.services.call('recorder', 'purge',
                                    service_data=service_data)
            self.hass.block_till_done()

            # Small wait for recorder thread
            self.hass.data[DATA_INSTANCE].block_till_done()

            # we should only have 2 states left after purging
            assert states.count() == 2

            # now we should only have 2 events left
            assert events.count() == 2

            assert not ('EVENT_TEST_PURGE' in (
                event.event_type for event in events.all()))

            # run purge method - correct service data, with repack
            with patch('homeassistant.components.recorder.purge._LOGGER') \
                    as mock_logger:
                service_data['repack'] = True
                self.hass.services.call('recorder', 'purge',
                                        service_data=service_data)
                self.hass.block_till_done()
                self.hass.data[DATA_INSTANCE].block_till_done()
                assert mock_logger.debug.mock_calls[3][1][0] == \
                    "Vacuuming SQLite to free space"
Exemple #10
0
def last_recorder_run(hass):
    """Retrieve the last closed recorder run from the database."""
    from homeassistant.components.recorder.models import RecorderRuns

    with session_scope(hass=hass) as session:
        res = (session.query(RecorderRuns)
               .order_by(RecorderRuns.end.desc()).first())
        if res is None:
            return None
        session.expunge(res)
        return res
Exemple #11
0
def _get_events(hass, start_day, end_day):
    """Get events for a period of time."""
    from homeassistant.components.recorder.models import Events
    from homeassistant.components.recorder.util import (
        execute, session_scope)

    with session_scope(hass=hass) as session:
        query = session.query(Events).order_by(
            Events.time_fired).filter(
                (Events.time_fired > start_day) &
                (Events.time_fired < end_day))
        return execute(query)
def test_recorder_bad_commit(hass_recorder):
    """Bad _commit should retry 3 times."""
    hass = hass_recorder()

    def work(session):
        """Bad work."""
        session.execute('select * from notthere')

    with patch('homeassistant.components.recorder.time.sleep') as e_mock, \
            util.session_scope(hass=hass) as session:
        res = util.commit(session, work)
    assert res is False
    assert e_mock.call_count == 3
Exemple #13
0
    def test_purge_old_states(self):
        """Test deleting old states."""
        self._add_test_states()
        # make sure we start with 6 states
        with session_scope(hass=self.hass) as session:
            states = session.query(States)
            assert states.count() == 6

            # run purge_old_data()
            purge_old_data(self.hass.data[DATA_INSTANCE], 4, repack=False)

            # we should only have 2 states left after purging
            assert states.count() == 2
    def test_purge_old_states(self):
        """Test deleting old states."""
        self._add_test_states()
        # make sure we start with 5 states
        with session_scope(hass=self.hass) as session:
            states = session.query(States)
            self.assertEqual(states.count(), 5)

            # run purge_old_data()
            purge_old_data(self.hass.data[DATA_INSTANCE], 4)

            # we should only have 2 states left after purging
            self.assertEqual(states.count(), 2)
    def test_purge_old_events(self):
        """Test deleting old events."""
        self._add_test_events()

        with session_scope(hass=self.hass) as session:
            events = session.query(Events).filter(
                Events.event_type.like("EVENT_TEST%"))
            self.assertEqual(events.count(), 5)

            # run purge_old_data()
            purge_old_data(self.hass.data[DATA_INSTANCE], 4)

            # now we should only have 3 events left
            self.assertEqual(events.count(), 3)
Exemple #16
0
    def test_purge_old_events(self):
        """Test deleting old events."""
        self._add_test_events()

        with session_scope(hass=self.hass) as session:
            events = session.query(Events).filter(
                Events.event_type.like("EVENT_TEST%"))
            assert events.count() == 6

            # run purge_old_data()
            purge_old_data(self.hass.data[DATA_INSTANCE], 4, repack=False)

            # we should only have 2 events left
            assert events.count() == 2
Exemple #17
0
def _get_events(hass, config, start_day, end_day):
    """Get events for a period of time."""
    from homeassistant.components.recorder.models import Events, States
    from homeassistant.components.recorder.util import (
        execute, session_scope)

    with session_scope(hass=hass) as session:
        query = session.query(Events).order_by(Events.time_fired) \
            .outerjoin(States, (Events.event_id == States.event_id))  \
            .filter(Events.event_type.in_(ALL_EVENT_TYPES)) \
            .filter((Events.time_fired > start_day)
                    & (Events.time_fired < end_day)) \
            .filter((States.last_updated == States.last_changed)
                    | (States.last_updated.is_(None)))
        events = execute(query)
    return humanify(_exclude_events(events, config))
Exemple #18
0
    def test_saving_state(self):
        """Test saving and restoring a state."""
        entity_id = 'test.recorder'
        state = 'restoring_from_db'
        attributes = {'test_attr': 5, 'test_attr_10': 'nice'}

        self.hass.states.set(entity_id, state, attributes)

        self.hass.block_till_done()
        self.hass.data[DATA_INSTANCE].block_till_done()

        with session_scope(hass=self.hass) as session:
            db_states = list(session.query(States))
            assert len(db_states) == 1
            state = db_states[0].to_native()

        assert state == self.hass.states.get(entity_id)
    def test_purge_method(self):
        """Test purge method."""
        service_data = {'keep_days': 4}
        self._add_test_states()
        self._add_test_events()

        # make sure we start with 5 states
        with session_scope(hass=self.hass) as session:
            states = session.query(States)
            self.assertEqual(states.count(), 5)

            events = session.query(Events).filter(
                Events.event_type.like("EVENT_TEST%"))
            self.assertEqual(events.count(), 5)

            self.hass.data[DATA_INSTANCE].block_till_done()

            # run purge method - no service data, should not work
            self.hass.services.call('recorder', 'purge')
            self.hass.async_block_till_done()

            # Small wait for recorder thread
            sleep(0.1)

            # we should only have 2 states left after purging
            self.assertEqual(states.count(), 5)

            # now we should only have 3 events left
            self.assertEqual(events.count(), 5)

            # run purge method - correct service data
            self.hass.services.call('recorder', 'purge',
                                    service_data=service_data)
            self.hass.async_block_till_done()

            # Small wait for recorder thread
            sleep(0.1)

            # we should only have 2 states left after purging
            self.assertEqual(states.count(), 2)

            # now we should only have 3 events left
            self.assertEqual(events.count(), 3)
Exemple #20
0
def state_changes_during_period(hass, start_time, end_time=None,
                                entity_id=None):
    """Return states changes during UTC period start_time - end_time."""
    from homeassistant.components.recorder.models import States

    with session_scope(hass=hass) as session:
        query = session.query(States).filter(
            (States.last_changed == States.last_updated) &
            (States.last_updated > start_time))

        if end_time is not None:
            query = query.filter(States.last_updated < end_time)

        if entity_id is not None:
            query = query.filter_by(entity_id=entity_id.lower())

        states = execute(
            query.order_by(States.last_updated))

    return states_to_json(hass, states, start_time, entity_id)
Exemple #21
0
async def test_saving_state(hass: HomeAssistant, recorder_mock):
    """Test saving and restoring a state."""
    entity_id = "test.recorder"
    state = "restoring_from_db"
    attributes = {"test_attr": 5, "test_attr_10": "nice"}

    hass.states.async_set(entity_id, state, attributes)

    await async_wait_recording_done(hass)

    with session_scope(hass=hass) as session:
        db_states = []
        for db_state, db_state_attributes in session.query(
                States, StateAttributes):
            db_states.append(db_state)
            state = db_state.to_native()
            state.attributes = db_state_attributes.to_native()
        assert len(db_states) == 1
        assert db_states[0].event_id > 0

    assert state == _state_empty_context(hass, entity_id)
Exemple #22
0
async def test_saving_state_with_intermixed_time_changes(
        hass: HomeAssistant, recorder_mock):
    """Test saving states with intermixed time changes."""
    entity_id = "test.recorder"
    state = "restoring_from_db"
    attributes = {"test_attr": 5, "test_attr_10": "nice"}
    attributes2 = {"test_attr": 10, "test_attr_10": "mean"}

    for _ in range(KEEPALIVE_TIME + 1):
        async_fire_time_changed(hass, dt_util.utcnow())
    hass.states.async_set(entity_id, state, attributes)
    for _ in range(KEEPALIVE_TIME + 1):
        async_fire_time_changed(hass, dt_util.utcnow())
    hass.states.async_set(entity_id, state, attributes2)

    await async_wait_recording_done(hass)

    with session_scope(hass=hass) as session:
        db_states = list(session.query(States))
        assert len(db_states) == 2
        assert db_states[0].event_id > 0
Exemple #23
0
def state_changes_during_period(hass,
                                start_time,
                                end_time=None,
                                entity_id=None):
    """Return states changes during UTC period start_time - end_time."""
    from homeassistant.components.recorder.models import States

    with session_scope(hass=hass) as session:
        query = session.query(
            States).filter((States.last_changed == States.last_updated)
                           & (States.last_updated > start_time))

        if end_time is not None:
            query = query.filter(States.last_updated < end_time)

        if entity_id is not None:
            query = query.filter_by(entity_id=entity_id.lower())

        states = execute(query.order_by(States.last_updated))

    return states_to_json(hass, states, start_time, entity_id)
async def test_saving_state(
        hass: HomeAssistant,
        async_setup_recorder_instance: SetupRecorderInstanceT):
    """Test saving and restoring a state."""
    instance = await async_setup_recorder_instance(hass)

    entity_id = "test.recorder"
    state = "restoring_from_db"
    attributes = {"test_attr": 5, "test_attr_10": "nice"}

    hass.states.async_set(entity_id, state, attributes)

    await async_wait_recording_done(hass, instance)

    with session_scope(hass=hass) as session:
        db_states = list(session.query(States))
        assert len(db_states) == 1
        assert db_states[0].event_id > 0
        state = db_states[0].to_native()

    assert state == _state_empty_context(hass, entity_id)
Exemple #25
0
def get_significant_states(hass,
                           start_time,
                           end_time=None,
                           entity_ids=None,
                           filters=None,
                           include_start_time_state=True):
    """
    Return states changes during UTC period start_time - end_time.

    Significant states are all states where there is a state change,
    as well as all states from certain domains (for instance
    thermostat so that we get current temperature in our graphs).
    """
    timer_start = time.perf_counter()
    from homeassistant.components.recorder.models import States

    with session_scope(hass=hass) as session:
        query = session.query(
            States).filter((States.domain.in_(SIGNIFICANT_DOMAINS)
                            | (States.last_changed == States.last_updated))
                           & (States.last_updated > start_time))

        if filters:
            query = filters.apply(query, entity_ids)

        if end_time is not None:
            query = query.filter(States.last_updated < end_time)

        query = query.order_by(States.last_updated)

        states = (state for state in execute(query)
                  if (_is_significant(state)
                      and not state.attributes.get(ATTR_HIDDEN, False)))

    if _LOGGER.isEnabledFor(logging.DEBUG):
        elapsed = time.perf_counter() - timer_start
        _LOGGER.debug('get_significant_states took %fs', elapsed)

    return states_to_json(hass, states, start_time, entity_ids, filters,
                          include_start_time_state)
Exemple #26
0
    async def _async_initialize_from_database(self):
        """Initialize the list of states from the database.

        The query will get the list of states in DESCENDING order so that we
        can limit the result to self._sample_size. Afterwards reverse the
        list so that we get it in the right order again.

        If MaxAge is provided then query will restrict to entries younger then
        current datetime - MaxAge.
        """

        _LOGGER.debug("%s: initializing values from the database",
                      self.entity_id)

        with session_scope(hass=self.hass) as session:
            query = session.query(States).filter(
                States.entity_id == self._entity_id.lower())

            if self._max_age is not None:
                records_older_then = dt_util.utcnow() - self._max_age
                _LOGGER.debug(
                    "%s: retrieve records not older then %s",
                    self.entity_id,
                    records_older_then,
                )
                query = query.filter(States.last_updated >= records_older_then)
            else:
                _LOGGER.debug("%s: retrieving all records.", self.entity_id)

            query = query.order_by(States.last_updated.desc()).limit(
                self._sampling_size)
            states = execute(query)

        for state in reversed(states):
            self._add_state_to_queue(state)

        self.async_schedule_update_ha_state(True)

        _LOGGER.debug("%s: initializing from database completed",
                      self.entity_id)
def test_purge_old_states(hass, hass_recorder):
    """Test deleting old states."""
    hass = hass_recorder()
    _add_test_states(hass)

    # make sure we start with 6 states
    with session_scope(hass=hass) as session:
        states = session.query(States)
        assert states.count() == 6

        # run purge_old_data()
        finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
        assert not finished
        assert states.count() == 4

        finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
        assert not finished
        assert states.count() == 2

        finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
        assert finished
        assert states.count() == 2
    def _initialize_from_database(self):
        """Initialize the list of states from the database.

        The query will get the list of states in DESCENDING order so that we
        can limit the result to self._sample_size. Afterwards reverse the
        list so that we get it in the right order again.
        """
        from homeassistant.components.recorder.models import States
        _LOGGER.debug("initializing values for %s from the database",
                      self.entity_id)

        with session_scope(hass=self._hass) as session:
            query = session.query(States)\
                .filter(States.entity_id == self._entity_id.lower())\
                .order_by(States.last_updated.desc())\
                .limit(self._sampling_size)
            states = execute(query)

        for state in reversed(states):
            self._add_state_to_queue(state)

        _LOGGER.debug("initializing from database completed")
def test_saving_event(hass, hass_recorder):
    """Test saving and restoring an event."""
    hass = hass_recorder()

    event_type = "EVENT_TEST"
    event_data = {"test_attr": 5, "test_attr_10": "nice"}

    events = []

    @callback
    def event_listener(event):
        """Record events from eventbus."""
        if event.event_type == event_type:
            events.append(event)

    hass.bus.listen(MATCH_ALL, event_listener)

    hass.bus.fire(event_type, event_data)

    wait_recording_done(hass)

    assert len(events) == 1
    event = events[0]

    hass.data[DATA_INSTANCE].block_till_done()

    with session_scope(hass=hass) as session:
        db_events = list(
            session.query(Events).filter_by(event_type=event_type))
        assert len(db_events) == 1
        db_event = db_events[0].to_native()

    assert event.event_type == db_event.event_type
    assert event.data == db_event.data
    assert event.origin == db_event.origin

    # Recorder uses SQLite and stores datetimes as integer unix timestamps
    assert event.time_fired.replace(
        microsecond=0) == db_event.time_fired.replace(microsecond=0)
Exemple #30
0
async def test_purge_old_recorder_runs(
        hass: HomeAssistant,
        async_setup_recorder_instance: SetupRecorderInstanceT):
    """Test deleting old recorder runs keeps current run."""
    instance = await async_setup_recorder_instance(hass)

    await _add_test_recorder_runs(hass, instance)

    # make sure we start with 7 recorder runs
    with session_scope(hass=hass) as session:
        recorder_runs = session.query(RecorderRuns)
        assert recorder_runs.count() == 7

        purge_before = dt_util.utcnow()

        # run purge_old_data()
        finished = purge_old_data(instance, purge_before, repack=False)
        assert not finished

        finished = purge_old_data(instance, purge_before, repack=False)
        assert finished
        assert recorder_runs.count() == 1
Exemple #31
0
def state_changes_during_period(hass,
                                start_time,
                                end_time=None,
                                entity_id=None):
    """Return states changes during UTC period start_time - end_time."""
    with session_scope(hass=hass) as session:
        query = session.query(
            *QUERY_STATES).filter((States.last_changed == States.last_updated)
                                  & (States.last_updated > start_time))

        if end_time is not None:
            query = query.filter(States.last_updated < end_time)

        if entity_id is not None:
            query = query.filter_by(entity_id=entity_id.lower())

        entity_ids = [entity_id] if entity_id is not None else None

        states = execute(query.order_by(States.entity_id, States.last_updated))

        return _sorted_states_to_json(hass, session, states, start_time,
                                      entity_ids)
Exemple #32
0
def test_get_full_significant_states_with_session_entity_no_matches(hass_recorder):
    """Test getting states at a specific point in time for entities that never have been recorded."""
    hass = hass_recorder()
    now = dt_util.utcnow()
    time_before_recorder_ran = now - timedelta(days=1000)
    with session_scope(hass=hass) as session:
        assert (
            history.get_full_significant_states_with_session(
                hass, session, time_before_recorder_ran, now, entity_ids=["demo.id"]
            )
            == {}
        )
        assert (
            history.get_full_significant_states_with_session(
                hass,
                session,
                time_before_recorder_ran,
                now,
                entity_ids=["demo.id", "demo.id2"],
            )
            == {}
        )
def get_last_state_changes(hass, number_of_states, entity_id):
    """Return the last number_of_states."""
    from homeassistant.components.recorder.models import States

    start_time = dt_util.utcnow()

    with session_scope(hass=hass) as session:
        query = session.query(States).filter(
            (States.last_changed == States.last_updated))

        if entity_id is not None:
            query = query.filter_by(entity_id=entity_id.lower())

        entity_ids = [entity_id] if entity_id is not None else None

        states = execute(
            query.order_by(States.last_updated.desc()).limit(number_of_states))

    return states_to_json(hass, reversed(states),
                          start_time,
                          entity_ids,
                          include_start_time_state=False)
Exemple #34
0
    async def _async_initialize_from_database(self):
        """Initialize the list of states from the database.
		The query will get the list of states in DESCENDING order so that we
		can limit the result to self._sample_size. Afterwards reverse the
		list so that we get it in the right order again.
		If MaxAge is provided then query will restrict to entries younger then
		current datetime - MaxAge.
		"""
        # limit range
        records_older_then = datetime.datetime.now(get_localzone()).replace(
            microsecond=0, second=0, minute=0, hour=0)
        #_LOGGER.error("DB time limit:")
        #_LOGGER.error(records_older_then)

        with session_scope(hass=self.hass) as session:

            # grab grid data
            query = session.query(States).filter(States.entity_id == self._net)
            query = query.filter(States.created >= records_older_then)
            states_net = execute(query)

            # grab solar data
            query = session.query(States).filter(States.entity_id == self._gen)
            query = query.filter(States.created >= records_older_then)
            states_gen = execute(query)

            # merge and sort by date
            states = states_net + states_gen
            #_LOGGER.error(states[0].last_updated)

            states.sort(key=lambda x: x.last_updated)

            #_LOGGER.error(str(len(states))+" entries found in db")
            session.expunge_all()

        for state in states:
            #all should be older based on the filter .. but we've seen strange behavior
            #if(state.last_updated > records_older_then):
            self.add_state(entity="", new_state=state)
Exemple #35
0
def get_last_state_changes(hass, number_of_states, entity_id):
    """Return the last number_of_states."""

    start_time = dt_util.utcnow()

    with session_scope(hass=hass) as session:
        query = session.query(States).filter(
            (States.last_changed == States.last_updated))

        if entity_id is not None:
            query = query.filter_by(entity_id=entity_id.lower())

        entity_ids = [entity_id] if entity_id is not None else None

        states = execute(
            query.order_by(States.last_updated.desc()).limit(number_of_states))

    return states_to_json(hass,
                          reversed(states),
                          start_time,
                          entity_ids,
                          include_start_time_state=False)
Exemple #36
0
def get_states(hass,
               utc_point_in_time,
               entity_ids=None,
               run=None,
               filters=None):
    """Return the states at a specific point in time."""
    from homeassistant.components.recorder.models import States

    if run is None:
        run = recorder.run_information(hass, utc_point_in_time)

        # History did not run before utc_point_in_time
        if run is None:
            return []

    from sqlalchemy import and_, func

    with session_scope(hass=hass) as session:
        most_recent_state_ids = session.query(
            func.max(States.state_id).label(
                'max_state_id')).filter((States.created >= run.start)
                                        & (States.created < utc_point_in_time)
                                        & (~States.domain.in_(IGNORE_DOMAINS)))

        if filters:
            most_recent_state_ids = filters.apply(most_recent_state_ids,
                                                  entity_ids)

        most_recent_state_ids = most_recent_state_ids.group_by(
            States.entity_id).subquery()

        query = session.query(States).join(
            most_recent_state_ids,
            and_(States.state_id == most_recent_state_ids.c.max_state_id))

        return [
            state for state in execute(query)
            if not state.attributes.get(ATTR_HIDDEN, False)
        ]
Exemple #37
0
def test_saving_state_with_serializable_data(hass_recorder, caplog):
    """Test saving data that cannot be serialized does not crash."""
    hass = hass_recorder()

    hass.bus.fire("bad_event", {"fail": CannotSerializeMe()})
    hass.states.set("test.one", "on", {"fail": CannotSerializeMe()})
    wait_recording_done(hass)
    hass.states.set("test.two", "on", {})
    wait_recording_done(hass)
    hass.states.set("test.two", "off", {})
    wait_recording_done(hass)

    with session_scope(hass=hass) as session:
        states = list(session.query(States))
        assert len(states) == 2

        assert states[0].entity_id == "test.two"
        assert states[1].entity_id == "test.two"
        assert states[0].old_state_id is None
        assert states[1].old_state_id == states[0].state_id

    assert "State is not JSON serializable" in caplog.text
    def _initialize_from_database(self):
        """Initialize the list of states from the database.

        The query will get the list of states in DESCENDING order so that we
        can limit the result to self._sample_size. Afterwards reverse the
        list so that we get it in the right order again.
        """
        from homeassistant.components.recorder.models import States
        _LOGGER.debug("initializing values for %s from the database",
                      self.entity_id)

        with session_scope(hass=self._hass) as session:
            query = session.query(States)\
                .filter(States.entity_id == self._entity_id.lower())\
                .order_by(States.last_updated.desc())\
                .limit(self._sampling_size)
            states = execute(query)

        for state in reversed(states):
            self._add_state_to_queue(state)

        _LOGGER.debug("initializing from database completed")
Exemple #39
0
async def test_purge_old_events(
        hass: HomeAssistantType,
        async_setup_recorder_instance: SetupRecorderInstanceT):
    """Test deleting old events."""
    instance = await async_setup_recorder_instance(hass)

    await _add_test_events(hass, instance)

    with session_scope(hass=hass) as session:
        events = session.query(Events).filter(
            Events.event_type.like("EVENT_TEST%"))
        assert events.count() == 6

        # run purge_old_data()
        finished = purge_old_data(instance, 4, repack=False)
        assert not finished
        assert events.count() == 2

        # we should only have 2 events left
        finished = purge_old_data(instance, 4, repack=False)
        assert finished
        assert events.count() == 2
def get_significant_states(hass, start_time, end_time=None, entity_ids=None,
                           filters=None, include_start_time_state=True):
    """
    Return states changes during UTC period start_time - end_time.

    Significant states are all states where there is a state change,
    as well as all states from certain domains (for instance
    thermostat so that we get current temperature in our graphs).
    """
    timer_start = time.perf_counter()
    from homeassistant.components.recorder.models import States

    with session_scope(hass=hass) as session:
        query = session.query(States).filter(
            (States.domain.in_(SIGNIFICANT_DOMAINS) |
             (States.last_changed == States.last_updated)) &
            (States.last_updated > start_time))

        if filters:
            query = filters.apply(query, entity_ids)

        if end_time is not None:
            query = query.filter(States.last_updated < end_time)

        query = query.order_by(States.last_updated)

        states = (
            state for state in execute(query)
            if (_is_significant(state) and
                not state.attributes.get(ATTR_HIDDEN, False)))

    if _LOGGER.isEnabledFor(logging.DEBUG):
        elapsed = time.perf_counter() - timer_start
        _LOGGER.debug(
            'get_significant_states took %fs', elapsed)

    return states_to_json(
        hass, states, start_time, entity_ids, filters,
        include_start_time_state)
def test_purge_old_events(hass, hass_recorder):
    """Test deleting old events."""
    hass = hass_recorder()
    _add_test_events(hass)

    with session_scope(hass=hass) as session:
        events = session.query(Events).filter(Events.event_type.like("EVENT_TEST%"))
        assert events.count() == 6

        # run purge_old_data()
        finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
        assert not finished
        assert events.count() == 4

        finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
        assert not finished
        assert events.count() == 2

        # we should only have 2 events left
        finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
        assert finished
        assert events.count() == 2
Exemple #42
0
    async def _load_history_from_db(self):
        """Load the history of the brightness values from the database.

        This only needs to be done once during startup.
        """

        start_date = datetime.now() - timedelta(days=self._conf_check_days)
        entity_id = self._readingmap.get(READING_BRIGHTNESS)
        if entity_id is None:
            _LOGGER.debug(
                "Not reading the history from the database as "
                "there is no brightness sensor configured"
            )
            return

        _LOGGER.debug("Initializing values for %s from the database", self._name)
        with session_scope(hass=self.hass) as session:
            query = (
                session.query(States)
                .filter(
                    (States.entity_id == entity_id.lower())
                    and (States.last_updated > start_date)
                )
                .order_by(States.last_updated.asc())
            )
            states = execute(query)

            for state in states:
                # filter out all None, NaN and "unknown" states
                # only keep real values
                try:
                    self._brightness_history.add_measurement(
                        int(state.state), state.last_updated
                    )
                except ValueError:
                    pass
        _LOGGER.debug("Initializing from database completed")
        self.async_write_ha_state()
Exemple #43
0
    async def _async_initialize_from_database(self):
        """Initialize the list of states from the database.

        The query will get the list of states in DESCENDING order so that we
        can limit the result to self._sample_size. Afterwards reverse the
        list so that we get it in the right order again.

        If MaxAge is provided then query will restrict to entries younger then
        current datetime - MaxAge.
        """
        from homeassistant.components.recorder.models import States
        _LOGGER.debug("%s: initializing values from the database",
                      self.entity_id)

        with session_scope(hass=self.hass) as session:
            query = session.query(States)\
                .filter(States.entity_id == self._entity_id.lower())

            if self._max_age is not None:
                records_older_then = dt_util.utcnow() - self._max_age
                _LOGGER.debug("%s: retrieve records not older then %s",
                              self.entity_id, records_older_then)
                query = query.filter(States.last_updated >= records_older_then)
            else:
                _LOGGER.debug("%s: retrieving all records.", self.entity_id)

            query = query\
                .order_by(States.last_updated.desc())\
                .limit(self._sampling_size)
            states = execute(query)

        for state in reversed(states):
            self._add_state_to_queue(state)

        self.async_schedule_update_ha_state(True)

        _LOGGER.debug("%s: initializing from database completed",
                      self.entity_id)
Exemple #44
0
async def test_state_gets_saved_when_set_before_start_event(
        hass: HomeAssistant,
        async_setup_recorder_instance: SetupRecorderInstanceT):
    """Test we can record an event when starting with not running."""

    hass.state = CoreState.not_running

    await async_init_recorder_component(hass)

    entity_id = "test.recorder"
    state = "restoring_from_db"
    attributes = {"test_attr": 5, "test_attr_10": "nice"}

    hass.states.async_set(entity_id, state, attributes)

    hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)

    await async_wait_recording_done_without_instance(hass)

    with session_scope(hass=hass) as session:
        db_states = list(session.query(States))
        assert len(db_states) == 1
        assert db_states[0].event_id > 0
Exemple #45
0
def test_end_incomplete_runs(hass_recorder, caplog):
    """Ensure we can end incomplete runs."""
    hass = hass_recorder()

    with session_scope(hass=hass) as session:
        run_info = run_information_with_session(session)
        assert isinstance(run_info, RecorderRuns)
        assert run_info.closed_incorrect is False

        now = dt_util.utcnow()
        now_without_tz = now.replace(tzinfo=None)
        end_incomplete_runs(session, now)
        run_info = run_information_with_session(session)
        assert run_info.closed_incorrect is True
        assert run_info.end == now_without_tz
        session.flush()

        later = dt_util.utcnow()
        end_incomplete_runs(session, later)
        run_info = run_information_with_session(session)
        assert run_info.end == now_without_tz

    assert "Ended unfinished session" in caplog.text
    def test_saving_event(self):
        """Test saving and restoring an event."""
        event_type = 'EVENT_TEST'
        event_data = {'test_attr': 5, 'test_attr_10': 'nice'}

        events = []

        @callback
        def event_listener(event):
            """Record events from eventbus."""
            if event.event_type == event_type:
                events.append(event)

        self.hass.bus.listen(MATCH_ALL, event_listener)

        self.hass.bus.fire(event_type, event_data)

        self.hass.block_till_done()

        assert len(events) == 1
        event = events[0]

        self.hass.data[DATA_INSTANCE].block_till_done()

        with session_scope(hass=self.hass) as session:
            db_events = list(session.query(Events).filter_by(
                event_type=event_type))
            assert len(db_events) == 1
            db_event = db_events[0].to_native()

        assert event.event_type == db_event.event_type
        assert event.data == db_event.data
        assert event.origin == db_event.origin

        # Recorder uses SQLite and stores datetimes as integer unix timestamps
        assert event.time_fired.replace(microsecond=0) == \
            db_event.time_fired.replace(microsecond=0)
Exemple #47
0
async def test_saving_many_states(
        hass: HomeAssistant,
        async_setup_recorder_instance: SetupRecorderInstanceT):
    """Test we expire after many commits."""
    instance = await async_setup_recorder_instance(hass)

    entity_id = "test.recorder"
    attributes = {"test_attr": 5, "test_attr_10": "nice"}

    with patch.object(hass.data[DATA_INSTANCE].event_session,
                      "expire_all") as expire_all, patch.object(
                          recorder, "EXPIRE_AFTER_COMMITS", 2):
        for _ in range(3):
            hass.states.async_set(entity_id, "on", attributes)
            await async_wait_recording_done(hass, instance)
            hass.states.async_set(entity_id, "off", attributes)
            await async_wait_recording_done(hass, instance)

    assert expire_all.called

    with session_scope(hass=hass) as session:
        db_states = list(session.query(States))
        assert len(db_states) == 6
        assert db_states[0].event_id > 0
Exemple #48
0
def get_last_state_changes(hass, number_of_states, entity_id):
    """Return the last number_of_states."""
    start_time = dt_util.utcnow()

    with session_scope(hass=hass) as session:
        baked_query = hass.data[HISTORY_BAKERY](
            lambda session: session.query(*QUERY_STATES)
        )
        baked_query += lambda q: q.filter(States.last_changed == States.last_updated)

        if entity_id is not None:
            baked_query += lambda q: q.filter_by(entity_id=bindparam("entity_id"))
            entity_id = entity_id.lower()

        baked_query += lambda q: q.order_by(
            States.entity_id, States.last_updated.desc()
        )

        baked_query += lambda q: q.limit(bindparam("number_of_states"))

        states = execute(
            baked_query(session).params(
                number_of_states=number_of_states, entity_id=entity_id
            )
        )

        entity_ids = [entity_id] if entity_id is not None else None

        return _sorted_states_to_dict(
            hass,
            session,
            reversed(states),
            start_time,
            entity_ids,
            include_start_time_state=False,
        )
Exemple #49
0
def test_saving_state_with_exception(hass, hass_recorder, caplog):
    """Test saving and restoring a state."""
    hass = hass_recorder()

    entity_id = "test.recorder"
    state = "restoring_from_db"
    attributes = {"test_attr": 5, "test_attr_10": "nice"}

    def _throw_if_state_in_session(*args, **kwargs):
        for obj in hass.data[DATA_INSTANCE].event_session:
            if isinstance(obj, States):
                raise OperationalError(
                    "insert the state", "fake params", "forced to fail"
                )

    with patch("time.sleep"), patch.object(
        hass.data[DATA_INSTANCE].event_session,
        "flush",
        side_effect=_throw_if_state_in_session,
    ):
        hass.states.set(entity_id, "fail", attributes)
        wait_recording_done(hass)

    assert "Error executing query" in caplog.text
    assert "Error saving events" not in caplog.text

    caplog.clear()
    hass.states.set(entity_id, state, attributes)
    wait_recording_done(hass)

    with session_scope(hass=hass) as session:
        db_states = list(session.query(States))
        assert len(db_states) >= 1

    assert "Error executing query" not in caplog.text
    assert "Error saving events" not in caplog.text
Exemple #50
0
def test_saving_sets_old_state(hass_recorder):
    """Test saving sets old state."""
    hass = hass_recorder()

    hass.states.set("test.one", "on", {})
    hass.states.set("test.two", "on", {})
    wait_recording_done(hass)
    hass.states.set("test.one", "off", {})
    hass.states.set("test.two", "off", {})
    wait_recording_done(hass)

    with session_scope(hass=hass) as session:
        states = list(session.query(States))
        assert len(states) == 4

        assert states[0].entity_id == "test.one"
        assert states[1].entity_id == "test.two"
        assert states[2].entity_id == "test.one"
        assert states[3].entity_id == "test.two"

        assert states[0].old_state_id is None
        assert states[1].old_state_id is None
        assert states[2].old_state_id == states[0].state_id
        assert states[3].old_state_id == states[1].state_id
def get_states(hass, utc_point_in_time, entity_ids=None, run=None,
               filters=None):
    """Return the states at a specific point in time."""
    from homeassistant.components.recorder.models import States

    if run is None:
        run = recorder.run_information(hass, utc_point_in_time)

        # History did not run before utc_point_in_time
        if run is None:
            return []

    from sqlalchemy import and_, func

    with session_scope(hass=hass) as session:
        if entity_ids and len(entity_ids) == 1:
            # Use an entirely different (and extremely fast) query if we only
            # have a single entity id
            most_recent_state_ids = session.query(
                States.state_id.label('max_state_id')
            ).filter(
                (States.last_updated < utc_point_in_time) &
                (States.entity_id.in_(entity_ids))
            ).order_by(
                States.last_updated.desc())

            most_recent_state_ids = most_recent_state_ids.limit(1)

        else:
            # We have more than one entity to look at (most commonly we want
            # all entities,) so we need to do a search on all states since the
            # last recorder run started.

            most_recent_states_by_date = session.query(
                States.entity_id.label('max_entity_id'),
                func.max(States.last_updated).label('max_last_updated')
            ).filter(
                (States.last_updated >= run.start) &
                (States.last_updated < utc_point_in_time)
            )

            if entity_ids:
                most_recent_states_by_date.filter(
                    States.entity_id.in_(entity_ids))

            most_recent_states_by_date = most_recent_states_by_date.group_by(
                States.entity_id)

            most_recent_states_by_date = most_recent_states_by_date.subquery()

            most_recent_state_ids = session.query(
                func.max(States.state_id).label('max_state_id')
            ).join(most_recent_states_by_date, and_(
                States.entity_id == most_recent_states_by_date.c.max_entity_id,
                States.last_updated == most_recent_states_by_date.c.
                max_last_updated))

            most_recent_state_ids = most_recent_state_ids.group_by(
                States.entity_id)

        most_recent_state_ids = most_recent_state_ids.subquery()

        query = session.query(States).join(
            most_recent_state_ids,
            States.state_id == most_recent_state_ids.c.max_state_id
        ).filter((~States.domain.in_(IGNORE_DOMAINS)))

        if filters:
            query = filters.apply(query, entity_ids)

        return [state for state in execute(query)
                if not state.attributes.get(ATTR_HIDDEN, False)]
Exemple #52
0
def get_significant_states(hass, *args, **kwargs):
    """Wrap _get_significant_states with a sql session."""
    with session_scope(hass=hass) as session:
        return _get_significant_states(hass, session, *args, **kwargs)
Exemple #53
0
def _get_events(
    hass,
    start_day,
    end_day,
    entity_ids=None,
    filters=None,
    entities_filter=None,
    entity_matches_only=False,
    context_id=None,
):
    """Get events for a period of time."""
    assert not (entity_ids
                and context_id), "can't pass in both entity_ids and context_id"

    entity_attr_cache = EntityAttributeCache(hass)
    context_lookup = {None: None}

    def yield_events(query):
        """Yield Events that are not filtered away."""
        for row in query.yield_per(1000):
            event = LazyEventPartialState(row)
            context_lookup.setdefault(event.context_id, event)
            if event.event_type == EVENT_CALL_SERVICE:
                continue
            if event.event_type == EVENT_STATE_CHANGED or _keep_event(
                    hass, event, entities_filter):
                yield event

    if entity_ids is not None:
        entities_filter = generate_filter([], entity_ids, [], [])

    with session_scope(hass=hass) as session:
        old_state = aliased(States, name="old_state")

        if entity_ids is not None:
            query = _generate_events_query_without_states(session)
            query = _apply_event_time_filter(query, start_day, end_day)
            query = _apply_event_types_filter(
                hass, query, ALL_EVENT_TYPES_EXCEPT_STATE_CHANGED)
            if entity_matches_only:
                # When entity_matches_only is provided, contexts and events that do not
                # contain the entity_ids are not included in the logbook response.
                query = _apply_event_entity_id_matchers(query, entity_ids)

            query = query.union_all(
                _generate_states_query(session, start_day, end_day, old_state,
                                       entity_ids))
        else:
            query = _generate_events_query(session)
            query = _apply_event_time_filter(query, start_day, end_day)
            query = _apply_events_types_and_states_filter(
                hass, query,
                old_state).filter((States.last_updated == States.last_changed)
                                  | (Events.event_type != EVENT_STATE_CHANGED))
            if filters:
                query = query.filter(filters.entity_filter() | (
                    Events.event_type != EVENT_STATE_CHANGED))

            if context_id is not None:
                query = query.filter(Events.context_id == context_id)

        query = query.order_by(Events.time_fired)

        return list(
            humanify(hass, yield_events(query), entity_attr_cache,
                     context_lookup))
Exemple #54
0
async def test_purge_filtered_events_state_changed(
    hass: HomeAssistantType,
    async_setup_recorder_instance: SetupRecorderInstanceT,
):
    """Test filtered state_changed events are purged. This should also remove all states."""
    config: ConfigType = {"exclude": {"event_types": [EVENT_STATE_CHANGED]}}
    instance = await async_setup_recorder_instance(hass, config)
    # Assert entity_id is NOT excluded
    assert instance.entity_filter("sensor.excluded") is True

    def _add_db_entries(hass: HomeAssistantType) -> None:
        with recorder.session_scope(hass=hass) as session:
            # Add states and state_changed events that should be purged
            for days in range(1, 4):
                timestamp = dt_util.utcnow() - timedelta(days=days)
                for event_id in range(1000, 1020):
                    _add_state_and_state_changed_event(
                        session,
                        "sensor.excluded",
                        "purgeme",
                        timestamp,
                        event_id * days,
                    )
            # Add events that should be keeped
            timestamp = dt_util.utcnow() - timedelta(days=1)
            for event_id in range(200, 210):
                session.add(
                    Events(
                        event_id=event_id,
                        event_type="EVENT_KEEP",
                        event_data="{}",
                        origin="LOCAL",
                        created=timestamp,
                        time_fired=timestamp,
                    ))
            # Add states with linked old_state_ids that need to be handled
            timestamp = dt_util.utcnow() - timedelta(days=0)
            state_1 = States(
                entity_id="sensor.linked_old_state_id",
                domain="sensor",
                state="keep",
                attributes="{}",
                last_changed=timestamp,
                last_updated=timestamp,
                created=timestamp,
                old_state_id=1,
            )
            timestamp = dt_util.utcnow() - timedelta(days=4)
            state_2 = States(
                entity_id="sensor.linked_old_state_id",
                domain="sensor",
                state="keep",
                attributes="{}",
                last_changed=timestamp,
                last_updated=timestamp,
                created=timestamp,
                old_state_id=2,
            )
            state_3 = States(
                entity_id="sensor.linked_old_state_id",
                domain="sensor",
                state="keep",
                attributes="{}",
                last_changed=timestamp,
                last_updated=timestamp,
                created=timestamp,
                old_state_id=62,  # keep
            )
            session.add_all((state_1, state_2, state_3))

    service_data = {"keep_days": 10, "apply_filter": True}
    _add_db_entries(hass)

    with session_scope(hass=hass) as session:
        events_keep = session.query(Events).filter(
            Events.event_type == "EVENT_KEEP")
        events_purge = session.query(Events).filter(
            Events.event_type == EVENT_STATE_CHANGED)
        states = session.query(States)

        assert events_keep.count() == 10
        assert events_purge.count() == 60
        assert states.count() == 63

        await hass.services.async_call(recorder.DOMAIN, recorder.SERVICE_PURGE,
                                       service_data)
        await hass.async_block_till_done()

        await async_recorder_block_till_done(hass, instance)
        await async_wait_purge_done(hass, instance)

        await async_recorder_block_till_done(hass, instance)
        await async_wait_purge_done(hass, instance)

        assert events_keep.count() == 10
        assert events_purge.count() == 0
        assert states.count() == 3

        assert session.query(States).get(61).old_state_id is None
        assert session.query(States).get(62).old_state_id is None
        assert session.query(States).get(
            63).old_state_id == 62  # should have been kept
Exemple #55
0
async def test_purge_filtered_events(
    hass: HomeAssistantType,
    async_setup_recorder_instance: SetupRecorderInstanceT,
):
    """Test filtered events are purged."""
    config: ConfigType = {"exclude": {"event_types": ["EVENT_PURGE"]}}
    instance = await async_setup_recorder_instance(hass, config)

    def _add_db_entries(hass: HomeAssistantType) -> None:
        with recorder.session_scope(hass=hass) as session:
            # Add events that should be purged
            for days in range(1, 4):
                timestamp = dt_util.utcnow() - timedelta(days=days)
                for event_id in range(1000, 1020):
                    session.add(
                        Events(
                            event_id=event_id * days,
                            event_type="EVENT_PURGE",
                            event_data="{}",
                            origin="LOCAL",
                            created=timestamp,
                            time_fired=timestamp,
                        ))

            # Add states and state_changed events that should be keeped
            timestamp = dt_util.utcnow() - timedelta(days=1)
            for event_id in range(200, 210):
                _add_state_and_state_changed_event(
                    session,
                    "sensor.keep",
                    "keep",
                    timestamp,
                    event_id,
                )

    service_data = {"keep_days": 10}
    _add_db_entries(hass)

    with session_scope(hass=hass) as session:
        events_purge = session.query(Events).filter(
            Events.event_type == "EVENT_PURGE")
        events_keep = session.query(Events).filter(
            Events.event_type == EVENT_STATE_CHANGED)
        states = session.query(States)

        assert events_purge.count() == 60
        assert events_keep.count() == 10
        assert states.count() == 10

        # Normal purge doesn't remove excluded events
        await hass.services.async_call(recorder.DOMAIN, recorder.SERVICE_PURGE,
                                       service_data)
        await hass.async_block_till_done()

        await async_recorder_block_till_done(hass, instance)
        await async_wait_purge_done(hass, instance)

        assert events_purge.count() == 60
        assert events_keep.count() == 10
        assert states.count() == 10

        # Test with 'apply_filter' = True
        service_data["apply_filter"] = True
        await hass.services.async_call(recorder.DOMAIN, recorder.SERVICE_PURGE,
                                       service_data)
        await hass.async_block_till_done()

        await async_recorder_block_till_done(hass, instance)
        await async_wait_purge_done(hass, instance)

        await async_recorder_block_till_done(hass, instance)
        await async_wait_purge_done(hass, instance)

        assert events_purge.count() == 0
        assert events_keep.count() == 10
        assert states.count() == 10
Exemple #56
0
async def test_purge_method(
    hass: HomeAssistantType,
    async_setup_recorder_instance: SetupRecorderInstanceT,
    caplog,
):
    """Test purge method."""
    instance = await async_setup_recorder_instance(hass)

    service_data = {"keep_days": 4}
    await _add_test_events(hass, instance)
    await _add_test_states(hass, instance)
    await _add_test_recorder_runs(hass, instance)
    await hass.async_block_till_done()
    await async_wait_recording_done(hass, instance)

    # make sure we start with 6 states
    with session_scope(hass=hass) as session:
        states = session.query(States)
        assert states.count() == 6

        events = session.query(Events).filter(
            Events.event_type.like("EVENT_TEST%"))
        assert events.count() == 6

        recorder_runs = session.query(RecorderRuns)
        assert recorder_runs.count() == 7
        runs_before_purge = recorder_runs.all()

        await hass.async_block_till_done()
        await async_wait_purge_done(hass, instance)

        # run purge method - no service data, use defaults
        await hass.services.async_call("recorder", "purge")
        await hass.async_block_till_done()

        # Small wait for recorder thread
        await async_wait_purge_done(hass, instance)

        # only purged old events
        assert states.count() == 4
        assert events.count() == 4

        # run purge method - correct service data
        await hass.services.async_call("recorder",
                                       "purge",
                                       service_data=service_data)
        await hass.async_block_till_done()

        # Small wait for recorder thread
        await async_wait_purge_done(hass, instance)

        # we should only have 2 states left after purging
        assert states.count() == 2

        # now we should only have 2 events left
        assert events.count() == 2

        # now we should only have 3 recorder runs left
        runs = recorder_runs.all()
        assert runs[0] == runs_before_purge[0]
        assert runs[1] == runs_before_purge[5]
        assert runs[2] == runs_before_purge[6]

        assert "EVENT_TEST_PURGE" not in (event.event_type
                                          for event in events.all())

        # run purge method - correct service data, with repack
        service_data["repack"] = True
        await hass.services.async_call("recorder",
                                       "purge",
                                       service_data=service_data)
        await hass.async_block_till_done()
        await async_wait_purge_done(hass, instance)
        assert "Vacuuming SQL DB to free space" in caplog.text