Example #1
0
def test_service_disable_states_not_recording(opp, opp_recorder):
    """Test that state changes are not recorded when recorder is disabled using service."""
    opp = opp_recorder()

    assert opp.services.call(
        DOMAIN,
        SERVICE_DISABLE,
        {},
        blocking=True,
    )

    opp.states.set("test.one", "on", {})
    wait_recording_done(opp)

    with session_scope(opp=opp) as session:
        assert len(list(session.query(States))) == 0

    assert opp.services.call(
        DOMAIN,
        SERVICE_ENABLE,
        {},
        blocking=True,
    )

    opp.states.set("test.two", "off", {})
    wait_recording_done(opp)

    with session_scope(opp=opp) as session:
        db_states = list(session.query(States))
        assert len(db_states) == 1
        assert db_states[0].event_id > 0
        assert db_states[0].to_native() == _state_empty_context(
            opp, "test.two")
Example #2
0
def _add_events(opp, events):
    with session_scope(opp=opp) as session:
        session.query(Events).delete(synchronize_session=False)
    for event_type in events:
        opp.bus.fire(event_type)
    wait_recording_done(opp)

    with session_scope(opp=opp) as session:
        return [ev.to_native() for ev in session.query(Events)]
Example #3
0
def _add_events(opp, events):
    with session_scope(opp=opp) as session:
        session.query(Events).delete(synchronize_session=False)
    for event_type in events:
        opp.bus.fire(event_type)
        opp.block_till_done()
    opp.data[DATA_INSTANCE].block_till_done()

    with session_scope(opp=opp) as session:
        return [ev.to_native() for ev in session.query(Events)]
Example #4
0
def test_entity_id_filter(opp_recorder):
    """Test that entity ID filtering filters string and list."""
    opp = opp_recorder({
        "include": {
            "domains": "hello"
        },
        "exclude": {
            "domains": "hidden_domain"
        }
    })

    for idx, data in enumerate((
        {},
        {
            "entity_id": "hello.world"
        },
        {
            "entity_id": ["hello.world"]
        },
        {
            "entity_id": ["hello.world", "hidden_domain.person"]
        },
        {
            "entity_id": {
                "unexpected": "data"
            }
        },
    )):
        opp.bus.fire("hello", data)
        wait_recording_done(opp)

        with session_scope(opp=opp) as session:
            db_events = list(
                session.query(Events).filter_by(event_type="hello"))
            assert len(db_events) == idx + 1, data

    for data in (
        {
            "entity_id": "hidden_domain.person"
        },
        {
            "entity_id": ["hidden_domain.person"]
        },
    ):
        opp.bus.fire("hello", data)
        wait_recording_done(opp)

        with session_scope(opp=opp) as session:
            db_events = list(
                session.query(Events).filter_by(event_type="hello"))
            # Keep referring idx + 1, as no new events are being added
            assert len(db_events) == idx + 1, data
Example #5
0
def test_run_information(opp_recorder):
    """Ensure run_information returns expected data."""
    before_start_recording = dt_util.utcnow()
    opp = opp_recorder()
    run_info = run_information_from_instance(opp)
    assert isinstance(run_info, RecorderRuns)
    assert run_info.closed_incorrect is False

    with session_scope(opp=opp) as session:
        run_info = run_information_with_session(session)
        assert isinstance(run_info, RecorderRuns)
        assert run_info.closed_incorrect is False

    run_info = run_information(opp)
    assert isinstance(run_info, RecorderRuns)
    assert run_info.closed_incorrect is False

    opp.states.set("test.two", "on", {})
    wait_recording_done(opp)
    run_info = run_information(opp)
    assert isinstance(run_info, RecorderRuns)
    assert run_info.closed_incorrect is False

    run_info = run_information(opp, before_start_recording)
    assert run_info is None

    run_info = run_information(opp, dt_util.utcnow())
    assert isinstance(run_info, RecorderRuns)
    assert run_info.closed_incorrect is False
Example #6
0
async def test_purge_old_states(
    opp: OpenPeerPower, async_setup_recorder_instance: SetupRecorderInstanceT
):
    """Test deleting old states."""
    instance = await async_setup_recorder_instance(opp)

    await _add_test_states(opp, instance)

    # make sure we start with 6 states
    with session_scope(opp=opp) as session:
        states = session.query(States)
        assert states.count() == 6
        assert states[0].old_state_id is None
        assert states[-1].old_state_id == states[-2].state_id

        events = session.query(Events).filter(Events.event_type == "state_changed")
        assert events.count() == 6

        # run purge_old_data()
        finished = purge_old_data(instance, 4, repack=False)
        assert not finished
        assert states.count() == 2

        states_after_purge = session.query(States)
        assert states_after_purge[1].old_state_id == states_after_purge[0].state_id
        assert states_after_purge[0].old_state_id is None

        finished = purge_old_data(instance, 4, repack=False)
        assert finished
        assert states.count() == 2
Example #7
0
def state_changes_during_period(opp,
                                start_time,
                                end_time=None,
                                entity_id=None):
    """Return states changes during UTC period start_time - end_time."""
    with session_scope(opp=opp) as session:
        baked_query = opp.data[HISTORY_BAKERY](
            lambda session: session.query(*QUERY_STATES))

        baked_query += lambda q: q.filter(
            (States.last_changed == States.last_updated)
            & (States.last_updated > bindparam("start_time")))

        if end_time is not None:
            baked_query += lambda q: q.filter(States.last_updated < bindparam(
                "end_time"))

        if entity_id is not None:
            baked_query += lambda q: q.filter_by(entity_id=bindparam(
                "entity_id"))
            entity_id = entity_id.lower()

        baked_query += lambda q: q.order_by(States.entity_id, States.
                                            last_updated)

        states = execute(
            baked_query(session).params(start_time=start_time,
                                        end_time=end_time,
                                        entity_id=entity_id))

        entity_ids = [entity_id] if entity_id is not None else None

        return _sorted_states_to_dict(opp, session, states, start_time,
                                      entity_ids)
Example #8
0
    def _load_history_from_db(self):
        """Load the history of the brightness values from the database.

        This only needs to be done once during startup.
        """

        start_date = datetime.now() - timedelta(days=self._conf_check_days)
        entity_id = self._readingmap.get(READING_BRIGHTNESS)
        if entity_id is None:
            _LOGGER.debug("Not reading the history from the database as "
                          "there is no brightness sensor configured")
            return

        _LOGGER.debug("Initializing values for %s from the database",
                      self._name)
        with session_scope(opp=self.opp) as session:
            query = (session.query(States).filter(
                (States.entity_id == entity_id.lower())
                and (States.last_updated > start_date)).order_by(
                    States.last_updated.asc()))
            states = execute(query, to_native=True, validate_entity_ids=False)

            for state in states:
                # filter out all None, NaN and "unknown" states
                # only keep real values
                with suppress(ValueError):
                    self._brightness_history.add_measurement(
                        int(state.state), state.last_updated)
        _LOGGER.debug("Initializing from database completed")
def _get_events(opp, config, start_day, end_day, entity_id=None):
    """Get events for a period of time."""
    entities_filter = _generate_filter_from_config(config)

    def yield_events(query):
        """Yield Events that are not filtered away."""
        for row in query.yield_per(500):
            event = row.to_native()
            if _keep_event(event, entities_filter):
                yield event

    with session_scope(opp=opp) as session:
        if entity_id is not None:
            entity_ids = [entity_id.lower()]
        else:
            entity_ids = _get_related_entity_ids(session, entities_filter)

        query = (session.query(Events).order_by(Events.time_fired).outerjoin(
            States, (Events.event_id == States.event_id)).filter(
                Events.event_type.in_(ALL_EVENT_TYPES)
            ).filter((Events.time_fired > start_day)
                     & (Events.time_fired < end_day)
                     ).filter(((States.last_updated == States.last_changed)
                               & States.entity_id.in_(entity_ids))
                              | (States.state_id.is_(None))))

        return list(humanify(opp, yield_events(query)))
Example #10
0
def test_saving_state_with_sqlalchemy_exception(opp, opp_recorder, caplog):
    """Test saving state when there is an SQLAlchemyError."""
    opp = opp_recorder()

    entity_id = "test.recorder"
    state = "restoring_from_db"
    attributes = {"test_attr": 5, "test_attr_10": "nice"}

    def _throw_if_state_in_session(*args, **kwargs):
        for obj in opp.data[DATA_INSTANCE].event_session:
            if isinstance(obj, States):
                raise SQLAlchemyError("insert the state", "fake params",
                                      "forced to fail")

    with patch("time.sleep"), patch.object(
            opp.data[DATA_INSTANCE].event_session,
            "flush",
            side_effect=_throw_if_state_in_session,
    ):
        opp.states.set(entity_id, "fail", attributes)
        wait_recording_done(opp)

    assert "SQLAlchemyError error processing event" in caplog.text

    caplog.clear()
    opp.states.set(entity_id, state, attributes)
    wait_recording_done(opp)

    with session_scope(opp=opp) as session:
        db_states = list(session.query(States))
        assert len(db_states) >= 1

    assert "Error executing query" not in caplog.text
    assert "Error saving events" not in caplog.text
    assert "SQLAlchemyError error processing event" not in caplog.text
Example #11
0
async def test_purge_old_states_encouters_database_corruption(
    opp: OpenPeerPower, async_setup_recorder_instance: SetupRecorderInstanceT
):
    """Test database image image is malformed while deleting old states."""
    instance = await async_setup_recorder_instance(opp)

    await _add_test_states(opp, instance)
    await async_wait_recording_done_without_instance(opp)

    sqlite3_exception = DatabaseError("statement", {}, [])
    sqlite3_exception.__cause__ = sqlite3.DatabaseError()

    with patch(
        "openpeerpower.components.recorder.move_away_broken_database"
    ) as move_away, patch(
        "openpeerpower.components.recorder.purge.purge_old_data",
        side_effect=sqlite3_exception,
    ):
        await opp.services.async_call(
            recorder.DOMAIN, recorder.SERVICE_PURGE, {"keep_days": 0}
        )
        await opp.async_block_till_done()
        await async_wait_recording_done_without_instance(opp)

    assert move_away.called

    # Ensure the whole database was reset due to the database error
    with session_scope(opp=opp) as session:
        states_after_purge = session.query(States)
        assert states_after_purge.count() == 0
Example #12
0
def get_last_state_changes(opp, number_of_states, entity_id):
    """Return the last number_of_states."""
    start_time = dt_util.utcnow()

    with session_scope(opp=opp) as session:
        baked_query = opp.data[HISTORY_BAKERY](
            lambda session: session.query(*QUERY_STATES))
        baked_query += lambda q: q.filter(States.last_changed == States.
                                          last_updated)

        if entity_id is not None:
            baked_query += lambda q: q.filter_by(entity_id=bindparam(
                "entity_id"))
            entity_id = entity_id.lower()

        baked_query += lambda q: q.order_by(States.entity_id,
                                            States.last_updated.desc())

        baked_query += lambda q: q.limit(bindparam("number_of_states"))

        states = execute(
            baked_query(session).params(number_of_states=number_of_states,
                                        entity_id=entity_id))

        entity_ids = [entity_id] if entity_id is not None else None

        return _sorted_states_to_dict(
            opp,
            session,
            reversed(states),
            start_time,
            entity_ids,
            include_start_time_state=False,
        )
Example #13
0
def test_session_scope_not_setup(opp_recorder):
    """Try to create a session scope when not setup."""
    opp = opp_recorder()
    with patch.object(opp.data[DATA_INSTANCE],
                      "get_session",
                      return_value=None), pytest.raises(RuntimeError):
        with util.session_scope(opp=opp):
            pass
    def test_purge_method(self):
        """Test purge method."""
        service_data = {"keep_days": 4}
        self._add_test_events()
        self._add_test_states()

        # make sure we start with 6 states
        with session_scope(opp=self.opp) as session:
            states = session.query(States)
            assert states.count() == 6

            events = session.query(Events).filter(
                Events.event_type.like("EVENT_TEST%"))
            assert events.count() == 6

            self.opp.data[DATA_INSTANCE].block_till_done()

            # run purge method - no service data, use defaults
            self.opp.services.call("recorder", "purge")
            self.opp.block_till_done()

            # Small wait for recorder thread
            self.opp.data[DATA_INSTANCE].block_till_done()

            # only purged old events
            assert states.count() == 4
            assert events.count() == 4

            # run purge method - correct service data
            self.opp.services.call("recorder",
                                   "purge",
                                   service_data=service_data)
            self.opp.block_till_done()

            # Small wait for recorder thread
            self.opp.data[DATA_INSTANCE].block_till_done()

            # we should only have 2 states left after purging
            assert states.count() == 2

            # now we should only have 2 events left
            assert events.count() == 2

            assert not ("EVENT_TEST_PURGE" in (event.event_type
                                               for event in events.all()))

            # run purge method - correct service data, with repack
            with patch("openpeerpower.components.recorder.purge._LOGGER"
                       ) as mock_logger:
                service_data["repack"] = True
                self.opp.services.call("recorder",
                                       "purge",
                                       service_data=service_data)
                self.opp.block_till_done()
                self.opp.data[DATA_INSTANCE].block_till_done()
                assert (mock_logger.debug.mock_calls[3][1][0] ==
                        "Vacuuming SQL DB to free space")
Example #15
0
def _add_entities(opp, entity_ids):
    """Add entities."""
    attributes = {"test_attr": 5, "test_attr_10": "nice"}
    for idx, entity_id in enumerate(entity_ids):
        opp.states.set(entity_id, f"state{idx}", attributes)
    wait_recording_done(opp)

    with session_scope(opp=opp) as session:
        return [st.to_native() for st in session.query(States)]
Example #16
0
def _add_entities(opp, entity_ids):
    """Add entities."""
    attributes = {"test_attr": 5, "test_attr_10": "nice"}
    for idx, entity_id in enumerate(entity_ids):
        opp.states.set(entity_id, "state{}".format(idx), attributes)
        opp.block_till_done()
    opp.data[DATA_INSTANCE].block_till_done()

    with session_scope(opp=opp) as session:
        return [st.to_native() for st in session.query(States)]
Example #17
0
async def test_purge_edge_case(
    opp: OpenPeerPower,
    async_setup_recorder_instance: SetupRecorderInstanceT,
):
    """Test states and events are purged even if they occurred shortly before purge_before."""

    async def _add_db_entries(opp: OpenPeerPower, timestamp: datetime) -> None:
        with recorder.session_scope(opp=opp) as session:
            session.add(
                Events(
                    event_id=1001,
                    event_type="EVENT_TEST_PURGE",
                    event_data="{}",
                    origin="LOCAL",
                    created=timestamp,
                    time_fired=timestamp,
                )
            )
            session.add(
                States(
                    entity_id="test.recorder2",
                    domain="sensor",
                    state="purgeme",
                    attributes="{}",
                    last_changed=timestamp,
                    last_updated=timestamp,
                    created=timestamp,
                    event_id=1001,
                )
            )

    instance = await async_setup_recorder_instance(opp, None)
    await async_wait_purge_done(opp, instance)

    service_data = {"keep_days": 2}
    timestamp = dt_util.utcnow() - timedelta(days=2, minutes=1)

    await _add_db_entries(opp, timestamp)
    with session_scope(opp=opp) as session:
        states = session.query(States)
        assert states.count() == 1

        events = session.query(Events).filter(Events.event_type == "EVENT_TEST_PURGE")
        assert events.count() == 1

        await opp.services.async_call(
            recorder.DOMAIN, recorder.SERVICE_PURGE, service_data
        )
        await opp.async_block_till_done()

        await async_recorder_block_till_done(opp, instance)
        await async_wait_purge_done(opp, instance)

        assert states.count() == 0
        assert events.count() == 0
Example #18
0
def test_service_disable_run_information_recorded(tmpdir):
    """Test that runs are still recorded when recorder is disabled."""
    test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
    dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"

    opp = get_test_open_peer_power()
    setup_component(opp, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}})
    opp.start()
    wait_recording_done(opp)

    with session_scope(opp=opp) as session:
        db_run_info = list(session.query(RecorderRuns))
        assert len(db_run_info) == 1
        assert db_run_info[0].start is not None
        assert db_run_info[0].end is None

    assert opp.services.call(
        DOMAIN,
        SERVICE_DISABLE,
        {},
        blocking=True,
    )

    wait_recording_done(opp)
    opp.stop()

    opp = get_test_open_peer_power()
    setup_component(opp, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}})
    opp.start()
    wait_recording_done(opp)

    with session_scope(opp=opp) as session:
        db_run_info = list(session.query(RecorderRuns))
        assert len(db_run_info) == 2
        assert db_run_info[0].start is not None
        assert db_run_info[0].end is not None
        assert db_run_info[1].start is not None
        assert db_run_info[1].end is None

    opp.stop()
    def test_purge_old_states(self):
        """Test deleting old states."""
        self._add_test_states()
        # make sure we start with 6 states
        with session_scope(opp=self.opp) as session:
            states = session.query(States)
            assert states.count() == 6

            # run purge_old_data()
            purge_old_data(self.opp.data[DATA_INSTANCE], 4, repack=False)

            # we should only have 2 states left after purging
            assert states.count() == 2
Example #20
0
def test_recorder_bad_commit(opp_recorder):
    """Bad _commit should retry 3 times."""
    opp = opp_recorder()

    def work(session):
        """Bad work."""
        session.execute("select * from notthere")

    with patch("openpeerpower.components.recorder.time.sleep"
               ) as e_mock, util.session_scope(opp=opp) as session:
        res = util.commit(session, work)
    assert res is False
    assert e_mock.call_count == 3
    def test_purge_old_events(self):
        """Test deleting old events."""
        self._add_test_events()

        with session_scope(opp=self.opp) as session:
            events = session.query(Events).filter(
                Events.event_type.like("EVENT_TEST%"))
            assert events.count() == 6

            # run purge_old_data()
            purge_old_data(self.opp.data[DATA_INSTANCE], 4, repack=False)

            # we should only have 2 events left
            assert events.count() == 2
Example #22
0
def get_states(opp,
               utc_point_in_time,
               entity_ids=None,
               run=None,
               filters=None):
    """Return the states at a specific point in time."""
    if run is None:
        run = recorder.run_information_from_instance(opp, utc_point_in_time)

        # History did not run before utc_point_in_time
        if run is None:
            return []

    with session_scope(opp=opp) as session:
        return _get_states_with_session(opp, session, utc_point_in_time,
                                        entity_ids, run, filters)
Example #23
0
    def _sorted_significant_states_json(
        self,
        opp,
        start_time,
        end_time,
        entity_ids,
        include_start_time_state,
        significant_changes_only,
        minimal_response,
    ):
        """Fetch significant stats from the database as json."""
        timer_start = time.perf_counter()

        with session_scope(opp=opp) as session:
            result = (
                history._get_significant_states(  # pylint: disable=protected-access
                    opp,
                    session,
                    start_time,
                    end_time,
                    entity_ids,
                    self.filters,
                    include_start_time_state,
                    significant_changes_only,
                    minimal_response,
                )
            )

        result = list(result.values())
        if _LOGGER.isEnabledFor(logging.DEBUG):
            elapsed = time.perf_counter() - timer_start
            _LOGGER.debug("Extracted %d states in %fs", sum(map(len, result)), elapsed)

        # Optionally reorder the result to respect the ordering given
        # by any entities explicitly included in the configuration.
        if self.filters and self.use_include_order:
            sorted_result = []
            for order_entity in self.filters.included_entities:
                for state_list in result:
                    if state_list[0].entity_id == order_entity:
                        sorted_result.append(state_list)
                        result.remove(state_list)
                        break
            sorted_result.extend(result)
            result = sorted_result

        return self.json(result)
Example #24
0
def test_saving_state_with_commit_interval_zero(opp_recorder):
    """Test saving a state with a commit interval of zero."""
    opp = opp_recorder({"commit_interval": 0})
    assert opp.data[DATA_INSTANCE].commit_interval == 0

    entity_id = "test.recorder"
    state = "restoring_from_db"
    attributes = {"test_attr": 5, "test_attr_10": "nice"}

    opp.states.set(entity_id, state, attributes)

    wait_recording_done(opp)

    with session_scope(opp=opp) as session:
        db_states = list(session.query(States))
        assert len(db_states) == 1
        assert db_states[0].event_id > 0
Example #25
0
    def test_saving_state(self):
        """Test saving and restoring a state."""
        entity_id = "test.recorder"
        state = "restoring_from_db"
        attributes = {"test_attr": 5, "test_attr_10": "nice"}

        self.opp.states.set(entity_id, state, attributes)

        self.opp.block_till_done()
        self.opp.data[DATA_INSTANCE].block_till_done()

        with session_scope(opp=self.opp) as session:
            db_states = list(session.query(States))
            assert len(db_states) == 1
            assert db_states[0].event_id > 0
            state = db_states[0].to_native()

        assert state == self.opp.states.get(entity_id)
Example #26
0
def test_saving_state_and_removing_entity(opp, opp_recorder):
    """Test saving the state of a removed entity."""
    opp = opp_recorder()
    entity_id = "lock.mine"
    opp.states.set(entity_id, STATE_LOCKED)
    opp.states.set(entity_id, STATE_UNLOCKED)
    opp.states.async_remove(entity_id)

    wait_recording_done(opp)

    with session_scope(opp=opp) as session:
        states = list(session.query(States))
        assert len(states) == 3
        assert states[0].entity_id == entity_id
        assert states[0].state == STATE_LOCKED
        assert states[1].entity_id == entity_id
        assert states[1].state == STATE_UNLOCKED
        assert states[2].entity_id == entity_id
        assert states[2].state is None
Example #27
0
async def test_purge_old_recorder_runs(
    opp: OpenPeerPower, async_setup_recorder_instance: SetupRecorderInstanceT
):
    """Test deleting old recorder runs keeps current run."""
    instance = await async_setup_recorder_instance(opp)

    await _add_test_recorder_runs(opp, instance)

    # make sure we start with 7 recorder runs
    with session_scope(opp=opp) as session:
        recorder_runs = session.query(RecorderRuns)
        assert recorder_runs.count() == 7

        # run purge_old_data()
        finished = purge_old_data(instance, 0, repack=False)
        assert not finished

        finished = purge_old_data(instance, 0, repack=False)
        assert finished
        assert recorder_runs.count() == 1
Example #28
0
def get_significant_states(
    opp,
    start_time,
    end_time=None,
    entity_ids=None,
    filters=None,
    include_start_time_state=True,
):
    """
    Return states changes during UTC period start_time - end_time.

    Significant states are all states where there is a state change,
    as well as all states from certain domains (for instance
    thermostat so that we get current temperature in our graphs).
    """
    timer_start = time.perf_counter()

    with session_scope(opp=opp) as session:
        query = session.query(States).filter(
            (States.domain.in_(SIGNIFICANT_DOMAINS)
             | (States.last_changed == States.last_updated))
            & (States.last_updated > start_time))

        if filters:
            query = filters.apply(query, entity_ids)

        if end_time is not None:
            query = query.filter(States.last_updated < end_time)

        query = query.order_by(States.last_updated)

        states = (state for state in execute(query)
                  if (_is_significant(state)
                      and not state.attributes.get(ATTR_HIDDEN, False)))

    if _LOGGER.isEnabledFor(logging.DEBUG):
        elapsed = time.perf_counter() - timer_start
        _LOGGER.debug("get_significant_states took %fs", elapsed)

    return states_to_json(opp, states, start_time, entity_ids, filters,
                          include_start_time_state)
Example #29
0
async def test_purge_old_events(
    opp: OpenPeerPower, async_setup_recorder_instance: SetupRecorderInstanceT
):
    """Test deleting old events."""
    instance = await async_setup_recorder_instance(opp)

    await _add_test_events(opp, instance)

    with session_scope(opp=opp) as session:
        events = session.query(Events).filter(Events.event_type.like("EVENT_TEST%"))
        assert events.count() == 6

        # run purge_old_data()
        finished = purge_old_data(instance, 4, repack=False)
        assert not finished
        assert events.count() == 2

        # we should only have 2 events left
        finished = purge_old_data(instance, 4, repack=False)
        assert finished
        assert events.count() == 2
Example #30
0
async def test_saving_state(
        opp: OpenPeerPower,
        async_setup_recorder_instance: SetupRecorderInstanceT):
    """Test saving and restoring a state."""
    instance = await async_setup_recorder_instance(opp)

    entity_id = "test.recorder"
    state = "restoring_from_db"
    attributes = {"test_attr": 5, "test_attr_10": "nice"}

    opp.states.async_set(entity_id, state, attributes)

    await async_wait_recording_done(opp, instance)

    with session_scope(opp=opp) as session:
        db_states = list(session.query(States))
        assert len(db_states) == 1
        assert db_states[0].event_id > 0
        state = db_states[0].to_native()

    assert state == _state_empty_context(opp, entity_id)