async def test_event_to_db_model(): """Test we can round trip Event conversion.""" event = ha.Event("state_changed", {"some": "attr"}, ha.EventOrigin.local, dt_util.utcnow()) native = Events.from_event(event).to_native() assert native == event native = Events.from_event(event, event_data="{}").to_native() event.data = {} assert native == event
async def _add_db_entries(hass: HomeAssistant, cutoff: datetime, rows: int) -> None: timestamp_keep = cutoff timestamp_purge = cutoff - timedelta(microseconds=1) with recorder.session_scope(hass=hass) as session: session.add( Events( event_id=1000, event_type="KEEP", event_data="{}", origin="LOCAL", time_fired=timestamp_keep, )) session.add( States( entity_id="test.cutoff", state="keep", attributes="{}", last_changed=timestamp_keep, last_updated=timestamp_keep, event_id=1000, attributes_id=1000, )) session.add( StateAttributes( shared_attrs="{}", hash=1234, attributes_id=1000, )) for row in range(1, rows): session.add( Events( event_id=1000 + row, event_type="PURGE", event_data="{}", origin="LOCAL", time_fired=timestamp_purge, )) session.add( States( entity_id="test.cutoff", state="purge", attributes="{}", last_changed=timestamp_purge, last_updated=timestamp_purge, event_id=1000 + row, attributes_id=1000 + row, )) session.add( StateAttributes( shared_attrs="{}", hash=1234, attributes_id=1000 + row, ))
def _add_db_entries(hass: HomeAssistant) -> None: with recorder.session_scope(hass=hass) as session: # Add states and state_changed events that should be purged # in the legacy format timestamp = dt_util.utcnow() - timedelta(days=5) event_id = 1021 session.add( States( entity_id="sensor.old_format", state=STATE_ON, attributes=json.dumps( {"old": "not_using_state_attributes"}), last_changed=timestamp, last_updated=timestamp, event_id=event_id, state_attributes=None, )) session.add( Events( event_id=event_id, event_type=EVENT_STATE_CHANGED, event_data="{}", origin="LOCAL", time_fired=timestamp, ))
def _add_test_events(self): """Add a few events for testing.""" now = datetime.now() five_days_ago = now - timedelta(days=5) eleven_days_ago = now - timedelta(days=11) event_data = {"test_attr": 5, "test_attr_10": "nice"} self.hass.block_till_done() self.hass.data[DATA_INSTANCE].block_till_done() with recorder.session_scope(hass=self.hass) as session: for event_id in range(6): if event_id < 2: timestamp = eleven_days_ago event_type = "EVENT_TEST_AUTOPURGE" elif event_id < 4: timestamp = five_days_ago event_type = "EVENT_TEST_PURGE" else: timestamp = now event_type = "EVENT_TEST" session.add( Events( event_type=event_type, event_data=json.dumps(event_data), origin="LOCAL", created=timestamp, time_fired=timestamp, ))
def _add_state_and_state_changed_event( session: Session, entity_id: str, state: str, timestamp: datetime, event_id: int, ) -> None: """Add state and state_changed event to database for testing.""" state_attrs = StateAttributes(hash=event_id, shared_attrs=json.dumps( {entity_id: entity_id})) session.add(state_attrs) session.add( States( entity_id=entity_id, state=state, attributes=None, last_changed=timestamp, last_updated=timestamp, event_id=event_id, state_attributes=state_attrs, )) session.add( Events( event_id=event_id, event_type=EVENT_STATE_CHANGED, event_data="{}", origin="LOCAL", time_fired=timestamp, ))
def _add_db_entries(hass: HomeAssistantType) -> None: with recorder.session_scope(hass=hass) as session: # Add events that should be purged for days in range(1, 4): timestamp = dt_util.utcnow() - timedelta(days=days) for event_id in range(1000, 1020): session.add( Events( event_id=event_id * days, event_type="EVENT_PURGE", event_data="{}", origin="LOCAL", created=timestamp, time_fired=timestamp, )) # Add states and state_changed events that should be keeped timestamp = dt_util.utcnow() - timedelta(days=1) for event_id in range(200, 210): _add_state_and_state_changed_event( session, "sensor.keep", "keep", timestamp, event_id, )
async def _add_db_entries(hass: HomeAssistant, timestamp: datetime) -> None: with recorder.session_scope(hass=hass) as session: session.add( Events( event_id=1001, event_type="EVENT_TEST_PURGE", event_data="{}", origin="LOCAL", time_fired=timestamp, )) session.add( States( entity_id="test.recorder2", state="purgeme", attributes="{}", last_changed=timestamp, last_updated=timestamp, event_id=1001, attributes_id=1002, )) session.add( StateAttributes( shared_attrs="{}", hash=1234, attributes_id=1002, ))
def _add_db_entries(hass: ha.HomeAssistant, point: datetime, entity_ids: list[str]) -> None: with recorder.session_scope(hass=hass) as session: for idx, entity_id in enumerate(entity_ids): session.add( Events( event_id=1001 + idx, event_type="state_changed", event_data="{}", origin="LOCAL", time_fired=point, )) session.add( States( entity_id=entity_id, state="on", attributes='{"name":"the light"}', last_changed=point, last_updated=point, event_id=1001 + idx, attributes_id=1002 + idx, )) session.add( StateAttributes( shared_attrs='{"name":"the shared light"}', hash=1234 + idx, attributes_id=1002 + idx, ))
def _add_test_events(self): """Add a few events for testing.""" now = datetime.now() five_days_ago = now - timedelta(days=5) event_data = {'test_attr': 5, 'test_attr_10': 'nice'} self.hass.block_till_done() self.hass.data[DATA_INSTANCE].block_till_done() with recorder.session_scope(hass=self.hass) as session: for event_id in range(5): if event_id < 2: timestamp = five_days_ago event_type = 'EVENT_TEST_PURGE' else: timestamp = now event_type = 'EVENT_TEST' session.add( Events( event_type=event_type, event_data=json.dumps(event_data), origin='LOCAL', created=timestamp, time_fired=timestamp, ))
def _add_db_entries(hass: HomeAssistant) -> None: with recorder.session_scope(hass=hass) as session: # Add states and state_changed events that should be purged for days in range(1, 4): timestamp = dt_util.utcnow() - timedelta(days=days) for event_id in range(1000, 1020): _add_state_and_state_changed_event( session, "sensor.excluded", "purgeme", timestamp, event_id * days, ) # Add events that should be keeped timestamp = dt_util.utcnow() - timedelta(days=1) for event_id in range(200, 210): session.add( Events( event_id=event_id, event_type="EVENT_KEEP", event_data="{}", origin="LOCAL", created=timestamp, time_fired=timestamp, ) ) # Add states with linked old_state_ids that need to be handled timestamp = dt_util.utcnow() - timedelta(days=0) state_1 = States( entity_id="sensor.linked_old_state_id", domain="sensor", state="keep", attributes="{}", last_changed=timestamp, last_updated=timestamp, created=timestamp, old_state_id=1, ) timestamp = dt_util.utcnow() - timedelta(days=4) state_2 = States( entity_id="sensor.linked_old_state_id", domain="sensor", state="keep", attributes="{}", last_changed=timestamp, last_updated=timestamp, created=timestamp, old_state_id=2, ) state_3 = States( entity_id="sensor.linked_old_state_id", domain="sensor", state="keep", attributes="{}", last_changed=timestamp, last_updated=timestamp, created=timestamp, old_state_id=62, # keep ) session.add_all((state_1, state_2, state_3))
async def _add_test_events(hass: HomeAssistantType, instance: recorder.Recorder): """Add a few events for testing.""" utcnow = dt_util.utcnow() five_days_ago = utcnow - timedelta(days=5) eleven_days_ago = utcnow - timedelta(days=11) event_data = {"test_attr": 5, "test_attr_10": "nice"} await hass.async_block_till_done() await async_wait_recording_done(hass, instance) with recorder.session_scope(hass=hass) as session: for event_id in range(6): if event_id < 2: timestamp = eleven_days_ago event_type = "EVENT_TEST_AUTOPURGE" elif event_id < 4: timestamp = five_days_ago event_type = "EVENT_TEST_PURGE" else: timestamp = utcnow event_type = "EVENT_TEST" session.add( Events( event_type=event_type, event_data=json.dumps(event_data), origin="LOCAL", created=timestamp, time_fired=timestamp, ))
def _add_state_and_state_changed_event( session: Session, entity_id: str, state: str, timestamp: datetime, event_id: int, ) -> None: """Add state and state_changed event to database for testing.""" session.add( States( entity_id=entity_id, domain="sensor", state=state, attributes="{}", last_changed=timestamp, last_updated=timestamp, created=timestamp, event_id=event_id, )) session.add( Events( event_id=event_id, event_type=EVENT_STATE_CHANGED, event_data="{}", origin="LOCAL", created=timestamp, time_fired=timestamp, ))
def _add_test_events(self): """Add a few events for testing.""" now = datetime.now() five_days_ago = now - timedelta(days=5) eleven_days_ago = now - timedelta(days=11) event_data = {'test_attr': 5, 'test_attr_10': 'nice'} self.hass.block_till_done() self.hass.data[DATA_INSTANCE].block_till_done() with recorder.session_scope(hass=self.hass) as session: for event_id in range(6): if event_id < 2: timestamp = eleven_days_ago event_type = 'EVENT_TEST_AUTOPURGE' elif event_id < 4: timestamp = five_days_ago event_type = 'EVENT_TEST_PURGE' else: timestamp = now event_type = 'EVENT_TEST' session.add( Events( event_type=event_type, event_data=json.dumps(event_data), origin='LOCAL', created=timestamp, time_fired=timestamp, )) # Add an event for the protected state protected_event = Events( event_type='EVENT_TEST_FOR_PROTECTED', event_data=json.dumps(event_data), origin='LOCAL', created=eleven_days_ago, time_fired=eleven_days_ago, ) session.add(protected_event) session.flush() self._protected_event_id = protected_event.event_id
def run(self): """Start processing events to save.""" from homeassistant.components.recorder.models import Events, States import sqlalchemy.exc while True: try: self._setup_connection() self._setup_run() break except sqlalchemy.exc.SQLAlchemyError as e: log_error(e, retry_wait=CONNECT_RETRY_WAIT, rollback=False, message="Error during connection setup: %s") if self.purge_days is not None: def purge_ticker(event): """Rerun purge every second day.""" self._purge_old_data() track_point_in_utc_time(self.hass, purge_ticker, dt_util.utcnow() + timedelta(days=2)) track_point_in_utc_time(self.hass, purge_ticker, dt_util.utcnow() + timedelta(minutes=5)) while True: event = self.queue.get() if event == self.quit_object: self._close_run() self._close_connection() # pylint: disable=global-statement global _INSTANCE _INSTANCE = None self.queue.task_done() return if event.event_type == EVENT_TIME_CHANGED: self.queue.task_done() continue dbevent = Events.from_event(event) self._commit(dbevent) if event.event_type != EVENT_STATE_CHANGED: self.queue.task_done() continue dbstate = States.from_event(event) dbstate.event_id = dbevent.event_id self._commit(dbstate) self.queue.task_done()
async def _add_test_states(hass: HomeAssistantType, instance: recorder.Recorder): """Add multiple states to the db for testing.""" utcnow = dt_util.utcnow() five_days_ago = utcnow - timedelta(days=5) eleven_days_ago = utcnow - timedelta(days=11) attributes = {"test_attr": 5, "test_attr_10": "nice"} await hass.async_block_till_done() await async_wait_recording_done(hass, instance) with recorder.session_scope(hass=hass) as session: old_state_id = None for event_id in range(6): if event_id < 2: timestamp = eleven_days_ago state = "autopurgeme" elif event_id < 4: timestamp = five_days_ago state = "purgeme" else: timestamp = utcnow state = "dontpurgeme" event = Events( event_type="state_changed", event_data="{}", origin="LOCAL", created=timestamp, time_fired=timestamp, ) session.add(event) session.flush() state = States( entity_id="test.recorder2", domain="sensor", state=state, attributes=json.dumps(attributes), last_changed=timestamp, last_updated=timestamp, created=timestamp, event_id=event.event_id, old_state_id=old_state_id, ) session.add(state) session.flush() old_state_id = state.state_id
def run(self): """Start processing events to save.""" from homeassistant.components.recorder.models import Events, States import sqlalchemy.exc while True: try: self._setup_connection() self._setup_run() break except sqlalchemy.exc.SQLAlchemyError as e: log_error(e, retry_wait=CONNECT_RETRY_WAIT, rollback=False, message="Error during connection setup: %s") if self.purge_days is not None: def purge_ticker(event): """Rerun purge every second day.""" self._purge_old_data() track_point_in_utc_time(self.hass, purge_ticker, dt_util.utcnow() + timedelta(days=2)) track_point_in_utc_time(self.hass, purge_ticker, dt_util.utcnow() + timedelta(minutes=5)) while True: event = self.queue.get() if event is None: self._close_run() self._close_connection() self.queue.task_done() return if event.event_type == EVENT_TIME_CHANGED: self.queue.task_done() continue if ATTR_ENTITY_ID in event.data: entity_id = event.data[ATTR_ENTITY_ID] domain = split_entity_id(entity_id)[0] # Exclude entities OR # Exclude domains, but include specific entities if (entity_id in self.exclude) or \ (domain in self.exclude and entity_id not in self.include_e): self.queue.task_done() continue # Included domains only (excluded entities above) OR # Include entities only, but only if no excludes if (self.include_d and domain not in self.include_d) or \ (self.include_e and entity_id not in self.include_e and not self.exclude): self.queue.task_done() continue dbevent = Events.from_event(event) self._commit(dbevent) if event.event_type != EVENT_STATE_CHANGED: self.queue.task_done() continue dbstate = States.from_event(event) dbstate.event_id = dbevent.event_id self._commit(dbstate) self.queue.task_done()
def test_from_event(self): """Test converting event to db event.""" event = ha.Event("test_event", {"some_data": 15}) assert event == Events.from_event(event).to_native()
def run(self): """Start processing events to save.""" from homeassistant.components.recorder.models import Events, States from sqlalchemy.exc import SQLAlchemyError while True: try: self._setup_connection() self._setup_run() self.db_ready.set() self.hass.loop.call_soon_threadsafe(self.async_db_ready.set) break except SQLAlchemyError as err: _LOGGER.error("Error during connection setup: %s (retrying " "in %s seconds)", err, CONNECT_RETRY_WAIT) time.sleep(CONNECT_RETRY_WAIT) if self.purge_days is not None: async_track_time_interval( self.hass, self._purge_old_data, timedelta(days=2)) while True: event = self.queue.get() if event is None: self._close_run() self._close_connection() self.queue.task_done() return if event.event_type == EVENT_TIME_CHANGED: self.queue.task_done() continue if ATTR_ENTITY_ID in event.data: entity_id = event.data[ATTR_ENTITY_ID] domain = split_entity_id(entity_id)[0] # Exclude entities OR # Exclude domains, but include specific entities if (entity_id in self.exclude) or \ (domain in self.exclude and entity_id not in self.include_e): self.queue.task_done() continue # Included domains only (excluded entities above) OR # Include entities only, but only if no excludes if (self.include_d and domain not in self.include_d) or \ (self.include_e and entity_id not in self.include_e and not self.exclude): self.queue.task_done() continue with session_scope() as session: dbevent = Events.from_event(event) self._commit(session, dbevent) if event.event_type != EVENT_STATE_CHANGED: self.queue.task_done() continue dbstate = States.from_event(event) dbstate.event_id = dbevent.event_id self._commit(session, dbstate) self.queue.task_done()
async def update_forecast(self, checktime: bool = True): """Update forecast state.""" try: if self._forecast_entity_id is None: _LOGGER.warning( "Solcast entities not yet registered, try again next day") return else: _doUpdate = True if checktime: _LOGGER.debug( "Update forecast by api call has been called.. checking if it is within sun rise/set times to proceed or not" ) self._debugData[ "update_forecast_by_time_last_called"] = dt_util.now( ).astimezone().isoformat() _hournow = dt_util.now().hour if _hournow == 0 or (_hournow > self._starthour and _hournow < self._finishhour): _doUpdate = True else: _doUpdate = False else: self._debugData[ "forced_update_forecast_not_by_time_last_called"] = dt_util.now( ).astimezone().isoformat() if _doUpdate: if not await self._fetch_forecasts(): _LOGGER.warning( "Could not fetch data from Solcast, try again next day" ) else: # Process data in case the forecast entity is already registered _LOGGER.debug( "Forecast successfully fetched for rooftop id %s", self._resource_id) _insert_items = [] with session_scope(hass=self._hass) as session: eventdata = session.query(Events).filter( Events.event_type == self._entry_id) # Process forecast data for forecasts in self._forecasts: if "pv_estimate10" in forecasts: d = { "period_end": forecasts["period_end"].isoformat(), "pv_estimate": forecasts["pv_estimate"], "pv_estimate10": forecasts.get("pv_estimate10"), "pv_estimate90": forecasts.get("pv_estimate90") } else: d = { "period_end": forecasts["period_end"].isoformat(), "pv_estimate": forecasts["pv_estimate"], "pv_estimate10": 0.0, "pv_estimate90": 0.0 } found = eventdata.filter( Events.time_fired == forecasts["period_end"]) foundcount = len(found.all()) if foundcount == 0: # we add a new record e = Events( event_type=self._entry_id, event_data=json.dumps(d), origin="LOCAL", time_fired=forecasts["period_end"], ) _insert_items.append(e) elif foundcount == 1: #update this one found.update( {Events.event_data: json.dumps(d)}, synchronize_session=False) else: # there are too many _LOGGER.error( "Too many records found for Solcast Forecast: %s count: %s !!This should not happen!!", self._resource_id, found.count) if len(_insert_items) > 0: session.add_all(_insert_items) session.commit() _LOGGER.debug( "Updated forecasts from Solcast API for roofop id %s", self._resource_id) self.set_state(SensorType.last_updated, dt_util.now().isoformat()) #self._states[SensorType.last_updated] = dt_util.now().isoformat() #dt_util.now().strftime("%Y%m%d%H%M%S") await self._notify_listeners(SensorType.last_updated) self.set_state(SensorType.api_count, self._api_used) #self._states[SensorType.api_count] = self._api_used await self._notify_listeners(SensorType.api_count) else: _LOGGER.debug( "Forecast update called, but was not told to actually make the api call" ) #update the ha view of the states every hour await self._notify_listeners(SensorType.forecast_today) await self._notify_listeners( SensorType.forecast_today_remaining) await self._notify_listeners(SensorType.forecast_tomorrow) except Exception: _LOGGER.error("update_forecast: %s", traceback.format_exc())
def test_from_event(self): """Test converting event to db event.""" event = ha.Event('test_event', { 'some_data': 15 }) assert event == Events.from_event(event).to_native()
def test_from_event_to_db_event(): """Test converting event to db event.""" event = ha.Event("test_event", {"some_data": 15}) db_event = Events.from_event(event) db_event.event_data = EventData.from_event(event).shared_data assert event == db_event.to_native()
def run(self): """Start processing events to save.""" from homeassistant.components.recorder.models import Events, States import sqlalchemy.exc while True: try: self._setup_connection() self._setup_run() break except sqlalchemy.exc.SQLAlchemyError as err: _LOGGER.error( "Error during connection setup: %s (retrying " "in %s seconds)", err, CONNECT_RETRY_WAIT) time.sleep(CONNECT_RETRY_WAIT) if self.purge_days is not None: async_track_time_interval(self.hass, self._purge_old_data, timedelta(days=2)) while True: event = self.queue.get() if event is None: self._close_run() self._close_connection() self.queue.task_done() return if event.event_type == EVENT_TIME_CHANGED: self.queue.task_done() continue if ATTR_ENTITY_ID in event.data: entity_id = event.data[ATTR_ENTITY_ID] domain = split_entity_id(entity_id)[0] # Exclude entities OR # Exclude domains, but include specific entities if (entity_id in self.exclude) or \ (domain in self.exclude and entity_id not in self.include_e): self.queue.task_done() continue # Included domains only (excluded entities above) OR # Include entities only, but only if no excludes if (self.include_d and domain not in self.include_d) or \ (self.include_e and entity_id not in self.include_e and not self.exclude): self.queue.task_done() continue with session_scope() as session: dbevent = Events.from_event(event) self._commit(session, dbevent) if event.event_type != EVENT_STATE_CHANGED: self.queue.task_done() continue dbstate = States.from_event(event) dbstate.event_id = dbevent.event_id self._commit(session, dbstate) self.queue.task_done()