def yield_rows(query: Query) -> Generator[Row, None, None]: """Yield Events that are not filtered away.""" for row in query.yield_per(1000): context_lookup.setdefault(row.context_id, row) if row.event_type != EVENT_CALL_SERVICE and ( row.event_type == EVENT_STATE_CHANGED or _keep_row(hass, row, entities_filter)): yield row
def yield_rows(query: Query) -> Generator[Row, None, None]: """Yield rows from the database.""" # end_day - start_day intentionally checks .days and not .total_seconds() # since we don't want to switch over to buffered if they go # over one day by a few hours since the UI makes it so easy to do that. if self.limited_select or (end_day - start_day).days <= 1: return query.all() # type: ignore[no-any-return] # Only buffer rows to reduce memory pressure # if we expect the result set is going to be very large. # What is considered very large is going to differ # based on the hardware Home Assistant is running on. # # sqlalchemy suggests that is at least 10k, but for # even and RPi3 that number seems higher in testing # so we don't switch over until we request > 1 day+ of data. # return query.yield_per(1024) # type: ignore[no-any-return]