def _async_update_entity(self, entity_id, *, name=_UNDEF, config_entry_id=_UNDEF, new_entity_id=_UNDEF, device_id=_UNDEF): """Private facing update properties method.""" old = self.entities[entity_id] changes = {} if name is not _UNDEF and name != old.name: changes['name'] = name if (config_entry_id is not _UNDEF and config_entry_id != old.config_entry_id): changes['config_entry_id'] = config_entry_id if (device_id is not _UNDEF and device_id != old.device_id): changes['device_id'] = device_id if new_entity_id is not _UNDEF and new_entity_id != old.entity_id: if self.async_is_registered(new_entity_id): raise ValueError('Entity is already registered') if not valid_entity_id(new_entity_id): raise ValueError('Invalid entity ID') if (split_entity_id(new_entity_id)[0] != split_entity_id(entity_id)[0]): raise ValueError('New entity ID should be same domain') self.entities.pop(entity_id) entity_id = changes['entity_id'] = new_entity_id if not changes: return old new = self.entities[entity_id] = attr.evolve(old, **changes) to_remove = [] for listener_ref in new.update_listeners: listener = listener_ref() if listener is None: to_remove.append(listener) else: try: listener.async_registry_updated(old, new) except Exception: # pylint: disable=broad-except _LOGGER.exception('Error calling update listener') for ref in to_remove: new.update_listeners.remove(ref) self.async_schedule_save() return new
def validate_entity_config(values): """Validate config entry for CONF_ENTITY.""" entities = {} for entity_id, config in values.items(): entity = cv.entity_id(entity_id) domain, _ = split_entity_id(entity) if not isinstance(config, dict): raise vol.Invalid('The configuration for {} must be ' ' a dictionary.'.format(entity)) if domain in ('alarm_control_panel', 'lock'): config = CODE_SCHEMA(config) elif domain == media_player.DOMAIN: config = FEATURE_SCHEMA(config) feature_list = {} for feature in config[CONF_FEATURE_LIST]: params = MEDIA_PLAYER_SCHEMA(feature) key = params.pop(CONF_FEATURE) if key in feature_list: raise vol.Invalid('A feature can be added only once for {}' .format(entity)) feature_list[key] = params config[CONF_FEATURE_LIST] = feature_list elif domain == 'switch': config = SWITCH_TYPE_SCHEMA(config) else: config = BASIC_INFO_SCHEMA(config) entities[entity] = config return entities
def __init__(self, hass, driver, name, entity_id, aid, config, category=CATEGORY_OTHER): """Initialize a Accessory object.""" super().__init__(driver, name, aid=aid) model = split_entity_id(entity_id)[0].replace("_", " ").title() self.set_info_service( firmware_revision=__version__, manufacturer=MANUFACTURER, model=model, serial_number=entity_id) self.category = category self.config = config self.entity_id = entity_id self.hass = hass self.debounce = {} self._support_battery_level = False self._support_battery_charging = True """Add battery service if available""" battery_level = self.hass.states.get(self.entity_id).attributes \ .get(ATTR_BATTERY_LEVEL) if battery_level is None: return _LOGGER.debug('%s: Found battery level attribute', self.entity_id) self._support_battery_level = True serv_battery = self.add_preload_service(SERV_BATTERY_SERVICE) self._char_battery = serv_battery.configure_char( CHAR_BATTERY_LEVEL, value=0) self._char_charging = serv_battery.configure_char( CHAR_CHARGING_STATE, value=2) self._char_low_battery = serv_battery.configure_char( CHAR_STATUS_LOW_BATTERY, value=0)
def expand_entity_ids(hass, entity_ids): """Return entity_ids with group entity ids replaced by their members. Async friendly. """ found_ids = [] for entity_id in entity_ids: if not isinstance(entity_id, str): continue entity_id = entity_id.lower() try: # If entity_id points at a group, expand it domain, _ = ha.split_entity_id(entity_id) if domain == DOMAIN: child_entities = get_entity_ids(hass, entity_id) if entity_id in child_entities: child_entities = list(child_entities) child_entities.remove(entity_id) found_ids.extend( ent_id for ent_id in expand_entity_ids(hass, child_entities) if ent_id not in found_ids) else: if entity_id not in found_ids: found_ids.append(entity_id) except AttributeError: # Raised by split_entity_id if entity_id is not a string pass return found_ids
def _add_data_in_last_run(entities): """Add test data in the last recorder_run.""" # pylint: disable=protected-access t_now = dt_util.utcnow() - timedelta(minutes=10) t_min_1 = t_now - timedelta(minutes=20) t_min_2 = t_now - timedelta(minutes=30) recorder_runs = recorder.get_model('RecorderRuns') states = recorder.get_model('States') with recorder.session_scope() as session: run = recorder_runs( start=t_min_2, end=t_now, created=t_min_2 ) recorder._INSTANCE._commit(session, run) for entity_id, state in entities.items(): dbstate = states( entity_id=entity_id, domain=split_entity_id(entity_id)[0], state=state, attributes='{}', last_changed=t_min_1, last_updated=t_min_1, created=t_min_1) recorder._INSTANCE._commit(session, dbstate)
def group_together(entity_id, associated_group_ids): # group all slave components together into energy_entity group obj_id = split_entity_id(entity_id)[-1] group_name = f"group.{obj_id}" associated_group_ids.append(entity_id) # add self to energy_entity group group.set_group(hass, obj_id, entity_ids=associated_group_ids) # TODO: cleanup return group_name # group.xxxxx
def is_on(hass, entity_id=None): """Load up the module to call the is_on method. If there is no entity id given we will check all. """ if entity_id: group = get_component('group') entity_ids = group.expand_entity_ids(hass, [entity_id]) else: entity_ids = hass.states.entity_ids() for entity_id in entity_ids: domain = ha.split_entity_id(entity_id)[0] module = get_component(domain) try: if module.is_on(hass, entity_id): return True except AttributeError: # module is None or method is_on does not exist _LOGGER.exception("Failed to call %s.is_on for %s", module, entity_id) return False
def is_on(hass, entity_id=None): """Load up the module to call the is_on method. If there is no entity id given we will check all. """ if entity_id: entity_ids = hass.components.group.expand_entity_ids([entity_id]) else: entity_ids = hass.states.entity_ids() for ent_id in entity_ids: domain = ha.split_entity_id(ent_id)[0] try: component = getattr(hass.components, domain) except ImportError: _LOGGER.error('Failed to call %s.is_on: component not found', domain) continue if not hasattr(component, 'is_on'): _LOGGER.warning("Component %s has no is_on method.", domain) continue if component.is_on(ent_id): return True return False
def handle_turn_service(service): """Method to handle calls to homeassistant.turn_on/off.""" entity_ids = extract_entity_ids(hass, service) # Generic turn on/off method requires entity id if not entity_ids: _LOGGER.error( "homeassistant/%s cannot be called without entity_id", service.service) return # Group entity_ids by domain. groupby requires sorted data. by_domain = it.groupby(sorted(entity_ids), lambda item: ha.split_entity_id(item)[0]) for domain, ent_ids in by_domain: # We want to block for all calls and only return when all calls # have been processed. If a service does not exist it causes a 10 # second delay while we're blocking waiting for a response. # But services can be registered on other HA instances that are # listening to the bus too. So as a in between solution, we'll # block only if the service is defined in the current HA instance. blocking = hass.services.has_service(domain, service.service) # Create a new dict for this call data = dict(service.data) # ent_ids is a generator, convert it to a list. data[ATTR_ENTITY_ID] = list(ent_ids) hass.services.call(domain, service.service, data, blocking)
def entity_filter_4b(entity_id): """Return filter function for case 4b.""" domain = split_entity_id(entity_id)[0] if domain in exclude_d: return entity_id in include_e else: return entity_id not in exclude_e
def test_switch_set_state(self): """Test if accessory and HA are updated accordingly.""" entity_id = 'switch.test' domain = split_entity_id(entity_id)[0] acc = Switch(self.hass, entity_id, 'Switch', aid=2) acc.run() self.assertEqual(acc.aid, 2) self.assertEqual(acc.category, 8) # Switch self.assertEqual(acc.char_on.value, False) self.hass.states.set(entity_id, STATE_ON) self.hass.block_till_done() self.assertEqual(acc.char_on.value, True) self.hass.states.set(entity_id, STATE_OFF) self.hass.block_till_done() self.assertEqual(acc.char_on.value, False) # Set from HomeKit acc.char_on.set_value(True) self.hass.block_till_done() self.assertEqual( self.events[0].data[ATTR_DOMAIN], domain) self.assertEqual( self.events[0].data[ATTR_SERVICE], SERVICE_TURN_ON) acc.char_on.set_value(False) self.hass.block_till_done() self.assertEqual( self.events[1].data[ATTR_DOMAIN], domain) self.assertEqual( self.events[1].data[ATTR_SERVICE], SERVICE_TURN_OFF)
async def test_reset_switch(hass, hk_driver, entity_id, attrs, events): """Test if switch accessory is reset correctly.""" domain = split_entity_id(entity_id)[0] hass.states.async_set(entity_id, None, attrs) await hass.async_block_till_done() acc = Switch(hass, hk_driver, 'Switch', entity_id, 2, None) await hass.async_add_job(acc.run) await hass.async_block_till_done() assert acc.activate_only is True assert acc.char_on.value is False call_turn_on = async_mock_service(hass, domain, 'turn_on') call_turn_off = async_mock_service(hass, domain, 'turn_off') await hass.async_add_job(acc.char_on.client_update_value, True) await hass.async_block_till_done() assert acc.char_on.value is True assert call_turn_on assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id assert len(events) == 1 assert events[-1].data[ATTR_VALUE] is None future = dt_util.utcnow() + timedelta(seconds=1) async_fire_time_changed(hass, future) await hass.async_block_till_done() assert acc.char_on.value is False assert len(events) == 1 assert not call_turn_off await hass.async_add_job(acc.char_on.client_update_value, False) await hass.async_block_till_done() assert acc.char_on.value is False assert len(events) == 1
def from_event(event): """Create object from a state_changed event.""" entity_id = event.data['entity_id'] state = event.data.get('new_state') dbstate = States( entity_id=entity_id, context_id=event.context.id, context_user_id=event.context.user_id, ) # State got deleted if state is None: dbstate.state = '' dbstate.domain = split_entity_id(entity_id)[0] dbstate.attributes = '{}' dbstate.last_changed = event.time_fired dbstate.last_updated = event.time_fired else: dbstate.domain = state.domain dbstate.state = state.state dbstate.attributes = json.dumps(dict(state.attributes), cls=JSONEncoder) dbstate.last_changed = state.last_changed dbstate.last_updated = state.last_updated return dbstate
def turn_on(hass, entity_id, variables=None, context=None): """Turn script on. This is a legacy helper method. Do not use it for new tests. """ _, object_id = split_entity_id(entity_id) hass.services.call(DOMAIN, object_id, variables, context=context)
def assert_domain(e: Event, target_domain: str) -> bool: entity_id = e.data['entity_id'] domain, name = split_entity_id(entity_id) if domain != target_domain: # make sure event target is correct return False else: return True
def _exclude_events(events, entities_filter): filtered_events = [] for event in events: domain, entity_id = None, None if event.event_type == EVENT_STATE_CHANGED: entity_id = event.data.get('entity_id') if entity_id is None: continue # Do not report on new entities if event.data.get('old_state') is None: continue new_state = event.data.get('new_state') # Do not report on entity removal if not new_state: continue attributes = new_state.get('attributes', {}) # If last_changed != last_updated only attributes have changed # we do not report on that yet. last_changed = new_state.get('last_changed') last_updated = new_state.get('last_updated') if last_changed != last_updated: continue domain = split_entity_id(entity_id)[0] # Also filter auto groups. if domain == 'group' and attributes.get('auto', False): continue # exclude entities which are customized hidden hidden = attributes.get(ATTR_HIDDEN, False) if hidden: continue elif event.event_type == EVENT_LOGBOOK_ENTRY: domain = event.data.get(ATTR_DOMAIN) entity_id = event.data.get(ATTR_ENTITY_ID) elif event.event_type == EVENT_ALEXA_SMART_HOME: domain = 'alexa' elif event.event_type == EVENT_HOMEKIT_CHANGED: domain = DOMAIN_HOMEKIT if not entity_id and domain: entity_id = "%s." % (domain, ) if not entity_id or entities_filter(entity_id): filtered_events.append(event) return filtered_events
def __init__(self, *args): """Initialize a Switch accessory object.""" super().__init__(*args, category=CATEGORY_SWITCH) self._domain = split_entity_id(self.entity_id)[0] self.flag_target_state = False serv_switch = self.add_preload_service(SERV_SWITCH) self.char_on = serv_switch.configure_char( CHAR_ON, value=False, setter_callback=self.set_state)
def __init__(self, *args, config): """Initialize a Switch accessory object to represent a remote.""" super().__init__(*args, category=CATEGORY_SWITCH) self._domain = split_entity_id(self.entity_id)[0] self.flag_target_state = False serv_switch = add_preload_service(self, SERV_SWITCH) self.char_on = setup_char( CHAR_ON, serv_switch, value=False, callback=self.set_state)
def to_python(self, value): """Validate entity id.""" if self._exist and hass.states.get(value) is None: raise ValidationError() if self._domain is not None and \ split_entity_id(value)[0] != self._domain: raise ValidationError() return value
def validate(values: Union[str, Sequence]) -> Sequence[str]: """Test if entity domain is domain.""" values = entity_ids(values) for ent_id in values: if split_entity_id(ent_id)[0] != domain: raise vol.Invalid( "Entity ID '{}' does not belong to domain '{}'" .format(ent_id, domain)) return values
def _create_processor_from_config(hass, camera_entity, config): """Create an OpenCV processor from configuration.""" classifier_config = config.get(CONF_CLASSIFIER) name = '{} {}'.format( config[CONF_NAME], split_entity_id(camera_entity)[1].replace('_', ' ')) processor = OpenCVImageProcessor( hass, camera_entity, name, classifier_config) return processor
def __init__(self, camera_entity, name=None): """Initialize Dlib face entity.""" super().__init__() self._camera = camera_entity if name: self._name = name else: self._name = "Dlib Face {0}".format( split_entity_id(camera_entity)[1])
def __init__(self, hass, camera_entity, name, session, detection_graph, category_index, config): """Initialize the TensorFlow entity.""" model_config = config.get(CONF_MODEL) self.hass = hass self._camera_entity = camera_entity if name: self._name = name else: self._name = "TensorFlow {0}".format( split_entity_id(camera_entity)[1]) self._session = session self._graph = detection_graph self._category_index = category_index self._min_confidence = config.get(CONF_CONFIDENCE) self._file_out = config.get(CONF_FILE_OUT) # handle categories and specific detection areas categories = model_config.get(CONF_CATEGORIES) self._include_categories = [] self._category_areas = {} for category in categories: if isinstance(category, dict): category_name = category.get(CONF_CATEGORY) category_area = category.get(CONF_AREA) self._include_categories.append(category_name) self._category_areas[category_name] = [0, 0, 1, 1] if category_area: self._category_areas[category_name] = [ category_area.get(CONF_TOP), category_area.get(CONF_LEFT), category_area.get(CONF_BOTTOM), category_area.get(CONF_RIGHT) ] else: self._include_categories.append(category) self._category_areas[category] = [0, 0, 1, 1] # Handle global detection area self._area = [0, 0, 1, 1] area_config = model_config.get(CONF_AREA) if area_config: self._area = [ area_config.get(CONF_TOP), area_config.get(CONF_LEFT), area_config.get(CONF_BOTTOM), area_config.get(CONF_RIGHT) ] template.attach(hass, self._file_out) self._matches = {} self._total_matches = 0 self._last_image = None
def __init__(self, camera_entity, name): """Initialize QR image processing entity.""" super().__init__() self._camera = camera_entity if name: self._name = name else: self._name = "QR {0}".format( split_entity_id(camera_entity)[1]) self._state = None
def __init__(self, hass, camera_entity, name, classifiers): """Initialize the OpenCV entity.""" self.hass = hass self._camera_entity = camera_entity if name: self._name = name else: self._name = "OpenCV {0}".format(split_entity_id(camera_entity)[1]) self._classifiers = classifiers self._matches = {} self._total_matches = 0 self._last_image = None
def __init__(self, ip, port, camera_entity, name=None): """Init with the API key and model id.""" super().__init__() self._url = "http://{}:{}/{}/check".format(ip, port, CLASSIFIER) self._camera = camera_entity if name: self._name = name else: camera_name = split_entity_id(camera_entity)[1] self._name = "{} {}".format( CLASSIFIER, camera_name) self._matched = {}
def add_entity_to_entity_state(hass, entity_state, entity_id): """add entity_id to entity_ids of group""" state = hass.states.get(entity_state) ids = state.attributes['entity_id'] if ids is not None: if type(ids) is tuple: ids = list(ids) if entity_id not in ids: ids.append(entity_id) else: ids = [entity_id] _LOGGER.warning(f'{dbg()}:twxxx: {entity_state}:{entity_id}:{ids}') set_group(hass, object_id=split_entity_id(entity_state)[-1], entity_ids=ids) # creates new group
def __init__(self, camera_entity, api, attributes, name=None): """Initialize Microsoft Face.""" super().__init__() self._api = api self._camera = camera_entity self._attributes = attributes if name: self._name = name else: self._name = "MicrosoftFace {0}".format( split_entity_id(camera_entity)[1])
def __init__(self, camera_entity, command, confidence, name=None): """Initialize OpenALPR local API.""" super().__init__() self._cmd = command self._camera = camera_entity self._confidence = confidence if name: self._name = name else: self._name = "OpenAlpr {0}".format( split_entity_id(camera_entity)[1])
def rm_entity_from_entity_state(hass, entity_state, entity_id): """remove single entity_id from entity_ids of group e.g. group.all_switches """ state = hass.states.get(entity_state) ids = state.attributes['entity_id'] if ids is not None: if type(ids) is tuple: ids = list(ids) if entity_id in ids: ids.remove(entity_id) set_group(hass, object_id=split_entity_id(entity_state)[-1], entity_ids=ids) # creates new group
def async_handle_turn_service(service): """Handle calls to homeassistant.turn_on/off.""" entity_ids = extract_entity_ids(hass, service) # Generic turn on/off method requires entity id if not entity_ids: _LOGGER.error( "homeassistant/%s cannot be called without entity_id", service.service) return # Group entity_ids by domain. groupby requires sorted data. by_domain = it.groupby(sorted(entity_ids), lambda item: ha.split_entity_id(item)[0]) tasks = [] for domain, ent_ids in by_domain: # We want to block for all calls and only return when all calls # have been processed. If a service does not exist it causes a 10 # second delay while we're blocking waiting for a response. # But services can be registered on other HA instances that are # listening to the bus too. So as a in between solution, we'll # block only if the service is defined in the current HA instance. blocking = hass.services.has_service(domain, service.service) # Create a new dict for this call data = dict(service.data) # ent_ids is a generator, convert it to a list. data[ATTR_ENTITY_ID] = list(ent_ids) tasks.append( hass.services.async_call(domain, service.service, data, blocking)) yield from asyncio.wait(tasks, loop=hass.loop)
def validate_entity_config(values): """Validate config entry for CONF_ENTITY.""" if not isinstance(values, dict): raise vol.Invalid("expected a dictionary") entities = {} for entity_id, config in values.items(): entity = cv.entity_id(entity_id) domain, _ = split_entity_id(entity) if not isinstance(config, dict): raise vol.Invalid( f"The configuration for {entity} must be a dictionary.") if domain in ("alarm_control_panel", "lock"): config = CODE_SCHEMA(config) elif domain == media_player.const.DOMAIN: config = FEATURE_SCHEMA(config) feature_list = {} for feature in config[CONF_FEATURE_LIST]: params = MEDIA_PLAYER_SCHEMA(feature) key = params.pop(CONF_FEATURE) if key in feature_list: raise vol.Invalid( f"A feature can be added only once for {entity}") feature_list[key] = params config[CONF_FEATURE_LIST] = feature_list elif domain == "switch": config = SWITCH_TYPE_SCHEMA(config) else: config = BASIC_INFO_SCHEMA(config) entities[entity] = config return entities
def __init__( self, ip_address, port, api_key, timeout, detect_only, save_file_folder, save_timestamped_file, save_faces_folder, save_faces, show_boxes, camera_entity, name=None, ): """Init with the API key and model id.""" super().__init__() self._dsface = ds.DeepstackFace( ip=ip_address, port=port, api_key=api_key, timeout=timeout ) self._detect_only = detect_only self._show_boxes = show_boxes self._last_detection = None self._save_file_folder = save_file_folder self._save_timestamped_file = save_timestamped_file self._save_faces_folder = save_faces_folder self._save_faces = save_faces self._camera = camera_entity if name: self._name = name else: camera_name = split_entity_id(camera_entity)[1] self._name = "{} {}".format(CLASSIFIER, camera_name) self._predictions = [] self._matched = {} self.total_faces = None
def handle_event(self, event): """Listen for new messages on the bus, and add them to Prometheus.""" state = event.data.get("new_state") if state is None: return entity_id = state.entity_id _LOGGER.debug("Handling state update for %s", entity_id) domain, _ = hacore.split_entity_id(entity_id) if not self._filter(state.entity_id): return handler = f"_handle_{domain}" if hasattr(self, handler) and state.state != STATE_UNAVAILABLE: getattr(self, handler)(state) labels = self._labels(state) state_change = self._metric( "state_change", self.prometheus_cli.Counter, "The number of state changes" ) state_change.labels(**labels).inc() entity_available = self._metric( "entity_available", self.prometheus_cli.Gauge, "Entity is available (not in the unavailable state)", ) entity_available.labels(**labels).set(float(state.state != STATE_UNAVAILABLE)) last_updated_time_seconds = self._metric( "last_updated_time_seconds", self.prometheus_cli.Gauge, "The last_updated timestamp", ) last_updated_time_seconds.labels(**labels).set(state.last_updated.timestamp())
def __init__(self, hass, camera_entity, name, options, crop): """Initialize the OpenCV entity.""" self.hass = hass self._camera_entity = camera_entity if name: self._name = name else: self._name = "OpenCV {0}".format(split_entity_id(camera_entity)[1]) self.confThreshold = options["threshold"] #Confidence threshold self.nmsThreshold = 0.4 #Non-maximum suppression threshold self.inpWidth = 416 #Width of network's input image self.inpHeight = 416 #Height of network's input image self.classes = None with open(options["labels"], 'rt') as f: self.classes = f.read().rstrip('\n').split('\n') # Give the configuration and weight files for the model and load the network using them. self.modelConfiguration = options["model"] self.modelWeights = options["weights"] self._crop = crop self._matches = {} self._total_matches = 0 self._last_image = None
def __init__(self, ip_address, port, username, password, hostname, camera_entity, name=None): """Init with the API key and model id.""" super().__init__() self._url_check = "http://{}:{}/{}/check".format( ip_address, port, CLASSIFIER) self._url_teach = "http://{}:{}/{}/teach".format( ip_address, port, CLASSIFIER) self._username = username self._password = password self._hostname = hostname self._camera = camera_entity if name: self._name = name else: camera_name = split_entity_id(camera_entity)[1] self._name = "{} {}".format(CLASSIFIER, camera_name) self._matched = {}
def expand_entity_ids(hass: HomeAssistantType, entity_ids: Iterable[Any]) -> List[str]: """Return entity_ids with group entity ids replaced by their members. Async friendly. """ found_ids: List[str] = [] for entity_id in entity_ids: if not isinstance(entity_id, str): continue entity_id = entity_id.lower() try: # If entity_id points at a group, expand it domain, _ = ha.split_entity_id(entity_id) if domain == DOMAIN: child_entities = get_entity_ids(hass, entity_id) if entity_id in child_entities: child_entities = list(child_entities) child_entities.remove(entity_id) found_ids.extend( ent_id for ent_id in expand_entity_ids(hass, child_entities) if ent_id not in found_ids ) else: if entity_id not in found_ids: found_ids.append(entity_id) except AttributeError: # Raised by split_entity_id if entity_id is not a string pass return found_ids
async def test_switch_set_state(hass, hk_driver, entity_id): """Test if accessory and HA are updated accordingly.""" domain = split_entity_id(entity_id)[0] hass.states.async_set(entity_id, None) await hass.async_block_till_done() acc = Switch(hass, hk_driver, 'Switch', entity_id, 2, None) await hass.async_add_job(acc.run) await hass.async_block_till_done() assert acc.aid == 2 assert acc.category == 8 # Switch assert acc.char_on.value is False hass.states.async_set(entity_id, STATE_ON) await hass.async_block_till_done() assert acc.char_on.value is True hass.states.async_set(entity_id, STATE_OFF) await hass.async_block_till_done() assert acc.char_on.value is False # Set from HomeKit call_turn_on = async_mock_service(hass, domain, 'turn_on') call_turn_off = async_mock_service(hass, domain, 'turn_off') await hass.async_add_job(acc.char_on.client_update_value, True) await hass.async_block_till_done() assert call_turn_on assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id await hass.async_add_job(acc.char_on.client_update_value, False) await hass.async_block_till_done() assert call_turn_off assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
def __init__(self, hass, driver, name, entity_id, aid, config, category=CATEGORY_OTHER): """Initialize a Accessory object.""" super().__init__(driver, name, aid=aid) model = split_entity_id(entity_id)[0].replace("_", " ").title() self.set_info_service(firmware_revision=__version__, manufacturer=MANUFACTURER, model=model, serial_number=entity_id) self.category = category self.config = config self.entity_id = entity_id self.hass = hass self.debounce = {} self._support_battery_level = False self._support_battery_charging = True """Add battery service if available""" battery_level = self.hass.states.get(self.entity_id).attributes \ .get(ATTR_BATTERY_LEVEL) if battery_level is None: return _LOGGER.debug('%s: Found battery level attribute', self.entity_id) self._support_battery_level = True serv_battery = self.add_preload_service(SERV_BATTERY_SERVICE) self._char_battery = serv_battery.configure_char(CHAR_BATTERY_LEVEL, value=0) self._char_charging = serv_battery.configure_char(CHAR_CHARGING_STATE, value=2) self._char_low_battery = serv_battery.configure_char( CHAR_STATUS_LOW_BATTERY, value=0)
def __init__(self, *args): """Initialize a Switch accessory object.""" super().__init__(*args, category=CATEGORY_SWITCH) self.domain = split_entity_id(self.entity_id)[0] state = self.hass.states.get(self.entity_id) self.select_chars = {} options = state.attributes[ATTR_OPTIONS] for option in options: serv_option = self.add_preload_service( SERV_OUTLET, [CHAR_NAME, CHAR_IN_USE] ) serv_option.configure_char( CHAR_NAME, value=cleanup_name_for_homekit(option) ) serv_option.configure_char(CHAR_IN_USE, value=False) self.select_chars[option] = serv_option.configure_char( CHAR_ON, value=False, setter_callback=lambda value, option=option: self.select_option(option), ) self.set_primary_service(self.select_chars[options[0]]) # Set the state so it is in sync on initial # GET to avoid an event storm after homekit startup self.async_update_state(state)
def handle_event(self, event): """Listen for new messages on the bus, and add them to Prometheus.""" state = event.data.get('new_state') if state is None: return entity_id = state.entity_id _LOGGER.debug("Handling state update for %s", entity_id) domain, _ = hacore.split_entity_id(entity_id) if not self._filter(state.entity_id): return handler = '_handle_{}'.format(domain) if hasattr(self, handler): getattr(self, handler)(state) metric = self._metric( 'state_change', self.prometheus_client.Counter, 'The number of state changes', ) metric.labels(**self._labels(state)).inc()
def _get_temperature(self, state: LazyState) -> Optional[float]: """Get temperature value from entity.""" ha_unit = self.hass.config.units.temperature_unit domain = split_entity_id(state.entity_id)[0] if domain == WEATHER_DOMAIN: temperature = state.attributes.get("temperature") entity_unit = ha_unit elif domain in (CLIMATE_DOMAIN, WATER_HEATER_DOMAIN): temperature = state.attributes.get("current_temperature") entity_unit = ha_unit else: temperature = state.state entity_unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if not self._has_state(temperature): return None try: temperature = convert_temperature(float(temperature), entity_unit, ha_unit) except ValueError as exc: _LOGGER.error('Could not convert value "%s" to float: %s', state, exc) return None return temperature
def handle_event(self, event): """Listen for new messages on the bus, and add them to Prometheus.""" state = event.data.get('new_state') if state is None: return entity_id = state.entity_id _LOGGER.debug("Handling state update for %s", entity_id) domain, _ = hacore.split_entity_id(entity_id) if entity_id in self.exclude: return if domain in self.exclude and entity_id not in self.include_entities: return if self.include_domains and domain not in self.include_domains: return if not self.exclude and (self.include_entities and entity_id not in self.include_entities): return handler = '_handle_{}'.format(domain) if hasattr(self, handler): getattr(self, handler)(state)
def __init__(self, camera_entity, faces, name, tolerance): """Initialize Dlib face identify entry.""" # pylint: disable=import-error import face_recognition super().__init__() self._camera = camera_entity if name: self._name = name else: self._name = "Dlib Face {0}".format( split_entity_id(camera_entity)[1]) self._faces = {} for face_name, face_file in faces.items(): try: image = face_recognition.load_image_file(face_file) self._faces[face_name] = \ face_recognition.face_encodings(image)[0] except IndexError as err: _LOGGER.error("Failed to parse %s. Error: %s", face_file, err) self._tolerance = tolerance
async def _entity_registry_updated(self, event) -> None: """Handle entity registry updated.""" if event.data["action"] != "remove": return entity_id = event.data[ATTR_ENTITY_ID] if split_entity_id(entity_id)[0] != "device_tracker": return for person in list(self.data.values()): if entity_id not in person[CONF_DEVICE_TRACKERS]: continue await self.async_update_item( person[collection.CONF_ID], { CONF_DEVICE_TRACKERS: [ devt for devt in person[CONF_DEVICE_TRACKERS] if devt != entity_id ] }, )
def from_event(event): """Create object from a state_changed event.""" entity_id = event.data["entity_id"] state = event.data.get("new_state") dbstate = States(entity_id=entity_id) # State got deleted if state is None: dbstate.state = "" dbstate.domain = split_entity_id(entity_id)[0] dbstate.attributes = "{}" dbstate.last_changed = event.time_fired dbstate.last_updated = event.time_fired else: dbstate.domain = state.domain dbstate.state = state.state dbstate.attributes = json.dumps(dict(state.attributes), cls=JSONEncoder, separators=(",", ":")) dbstate.last_changed = state.last_changed dbstate.last_updated = state.last_updated return dbstate
async def _async_add_entity( self, entity, update_before_add, entity_registry, device_registry ): """Add an entity to the platform.""" if entity is None: raise ValueError("Entity cannot be None") entity.hass = self.hass entity.platform = self # Async entity # PARALLEL_UPDATES == None: entity.parallel_updates = None # PARALLEL_UPDATES == 0: entity.parallel_updates = None # PARALLEL_UPDATES > 0: entity.parallel_updates = Semaphore(p) # Sync entity # PARALLEL_UPDATES == None: entity.parallel_updates = Semaphore(1) # PARALLEL_UPDATES == 0: entity.parallel_updates = None # PARALLEL_UPDATES > 0: entity.parallel_updates = Semaphore(p) if hasattr(entity, "async_update") and not self.parallel_updates: entity.parallel_updates = None elif not hasattr(entity, "async_update") and self.parallel_updates == 0: entity.parallel_updates = None else: entity.parallel_updates = self._get_parallel_updates_semaphore() # Update properties before we generate the entity_id if update_before_add: try: await entity.async_device_update(warning=False) except Exception: # pylint: disable=broad-except self.logger.exception("%s: Error on device update!", self.platform_name) return suggested_object_id = None # Get entity_id from unique ID registration if entity.unique_id is not None: if entity.entity_id is not None: suggested_object_id = split_entity_id(entity.entity_id)[1] else: suggested_object_id = entity.name if self.entity_namespace is not None: suggested_object_id = f"{self.entity_namespace} {suggested_object_id}" if self.config_entry is not None: config_entry_id = self.config_entry.entry_id else: config_entry_id = None device_info = entity.device_info device_id = None if config_entry_id is not None and device_info is not None: processed_dev_info = {"config_entry_id": config_entry_id} for key in ( "connections", "identifiers", "manufacturer", "model", "name", "sw_version", "via_device", ): if key in device_info: processed_dev_info[key] = device_info[key] device = device_registry.async_get_or_create(**processed_dev_info) if device: device_id = device.id disabled_by: Optional[str] = None if not entity.entity_registry_enabled_default: disabled_by = DISABLED_INTEGRATION entry = entity_registry.async_get_or_create( self.domain, self.platform_name, entity.unique_id, suggested_object_id=suggested_object_id, config_entry=self.config_entry, device_id=device_id, known_object_ids=self.entities.keys(), disabled_by=disabled_by, capabilities=entity.capability_attributes, supported_features=entity.supported_features, device_class=entity.device_class, unit_of_measurement=entity.unit_of_measurement, ) entity.registry_entry = entry entity.entity_id = entry.entity_id if entry.disabled: self.logger.info( "Not adding entity %s because it's disabled", entry.name or entity.name or f'"{self.platform_name} {entity.unique_id}"', ) return # We won't generate an entity ID if the platform has already set one # We will however make sure that platform cannot pick a registered ID elif entity.entity_id is not None and entity_registry.async_is_registered( entity.entity_id ): # If entity already registered, convert entity id to suggestion suggested_object_id = split_entity_id(entity.entity_id)[1] entity.entity_id = None # Generate entity ID if entity.entity_id is None: suggested_object_id = ( suggested_object_id or entity.name or DEVICE_DEFAULT_NAME ) if self.entity_namespace is not None: suggested_object_id = f"{self.entity_namespace} {suggested_object_id}" entity.entity_id = entity_registry.async_generate_entity_id( self.domain, suggested_object_id, self.entities.keys() ) # Make sure it is valid in case an entity set the value themselves if not valid_entity_id(entity.entity_id): raise HomeAssistantError(f"Invalid entity id: {entity.entity_id}") already_exists = entity.entity_id in self.entities if not already_exists: existing = self.hass.states.get(entity.entity_id) if existing and not existing.attributes.get("restored"): already_exists = True if already_exists: msg = f"Entity id already exists: {entity.entity_id}" if entity.unique_id is not None: msg += f". Platform {self.platform_name} does not generate unique IDs" raise HomeAssistantError(msg) entity_id = entity.entity_id self.entities[entity_id] = entity entity.async_on_remove(lambda: self.entities.pop(entity_id)) await entity.async_internal_added_to_hass() await entity.async_added_to_hass() await entity.async_update_ha_state()
async def test_reload_service(hass, running): """Verify the reload service.""" event = "test_event" event_flag = asyncio.Event() @callback def event_handler(event): event_flag.set() hass.bus.async_listen_once(event, event_handler) hass.states.async_set("test.script", "off") assert await async_setup_component( hass, "script", { "script": { "test": { "sequence": [ { "event": event }, { "wait_template": "{{ is_state('test.script', 'on') }}" }, ] } } }, ) assert hass.states.get(ENTITY_ID) is not None assert hass.services.has_service(script.DOMAIN, "test") if running != "no": _, object_id = split_entity_id(ENTITY_ID) await hass.services.async_call(DOMAIN, object_id) await asyncio.wait_for(event_flag.wait(), 1) assert script.is_on(hass, ENTITY_ID) object_id = "test" if running == "same" else "test2" with patch( "homeassistant.config.load_yaml_config_file", return_value={ "script": { object_id: { "sequence": [{ "delay": { "seconds": 5 } }] } } }, ): await hass.services.async_call(DOMAIN, SERVICE_RELOAD, blocking=True) await hass.async_block_till_done() if running != "same": assert hass.states.get(ENTITY_ID) is None assert not hass.services.has_service(script.DOMAIN, "test") assert hass.states.get("script.test2") is not None assert hass.services.has_service(script.DOMAIN, "test2") else: assert hass.states.get(ENTITY_ID) is not None assert hass.services.has_service(script.DOMAIN, "test")
def _domain_default(self) -> str: """Compute domain value.""" return split_entity_id(self.entity_id)[0]
def _async_update_entity( self, entity_id, *, name=_UNDEF, icon=_UNDEF, config_entry_id=_UNDEF, new_entity_id=_UNDEF, device_id=_UNDEF, new_unique_id=_UNDEF, disabled_by=_UNDEF, capabilities=_UNDEF, supported_features=_UNDEF, device_class=_UNDEF, unit_of_measurement=_UNDEF, original_name=_UNDEF, original_icon=_UNDEF, ): """Private facing update properties method.""" old = self.entities[entity_id] changes = {} for attr_name, value in ( ("name", name), ("icon", icon), ("config_entry_id", config_entry_id), ("device_id", device_id), ("disabled_by", disabled_by), ("capabilities", capabilities), ("supported_features", supported_features), ("device_class", device_class), ("unit_of_measurement", unit_of_measurement), ("original_name", original_name), ("original_icon", original_icon), ): if value is not _UNDEF and value != getattr(old, attr_name): changes[attr_name] = value if new_entity_id is not _UNDEF and new_entity_id != old.entity_id: if self.async_is_registered(new_entity_id): raise ValueError("Entity is already registered") if not valid_entity_id(new_entity_id): raise ValueError("Invalid entity ID") if split_entity_id(new_entity_id)[0] != split_entity_id(entity_id)[0]: raise ValueError("New entity ID should be same domain") self.entities.pop(entity_id) entity_id = changes["entity_id"] = new_entity_id if new_unique_id is not _UNDEF: conflict = next( ( entity for entity in self.entities.values() if entity.unique_id == new_unique_id and entity.domain == old.domain and entity.platform == old.platform ), None, ) if conflict: raise ValueError( f"Unique id '{new_unique_id}' is already in use by " f"'{conflict.entity_id}'" ) changes["unique_id"] = new_unique_id if not changes: return old new = self.entities[entity_id] = attr.evolve(old, **changes) self.async_schedule_save() data = {"action": "update", "entity_id": entity_id, "changes": list(changes)} if old.entity_id != entity_id: data["old_entity_id"] = old.entity_id self.hass.bus.async_fire(EVENT_ENTITY_REGISTRY_UPDATED, data) return new
def test_split_entity_id(): """Test split_entity_id.""" assert ha.split_entity_id("domain.object_id") == ["domain", "object_id"]
def async_update_entity( self, entity_id: str, *, area_id: str | None | UndefinedType = UNDEFINED, capabilities: Mapping[str, Any] | None | UndefinedType = UNDEFINED, config_entry_id: str | None | UndefinedType = UNDEFINED, device_class: str | None | UndefinedType = UNDEFINED, device_id: str | None | UndefinedType = UNDEFINED, disabled_by: RegistryEntryDisabler | None | UndefinedType = UNDEFINED, # Type str (ENTITY_CATEG*) is deprecated as of 2021.12, use EntityCategory entity_category: EntityCategory | str | None | UndefinedType = UNDEFINED, icon: str | None | UndefinedType = UNDEFINED, name: str | None | UndefinedType = UNDEFINED, new_entity_id: str | UndefinedType = UNDEFINED, new_unique_id: str | UndefinedType = UNDEFINED, original_device_class: str | None | UndefinedType = UNDEFINED, original_icon: str | None | UndefinedType = UNDEFINED, original_name: str | None | UndefinedType = UNDEFINED, supported_features: int | UndefinedType = UNDEFINED, unit_of_measurement: str | None | UndefinedType = UNDEFINED, ) -> RegistryEntry: """Private facing update properties method.""" old = self.entities[entity_id] new_values: dict[str, Any] = {} # Dict with new key/value pairs old_values: dict[str, Any] = {} # Dict with old key/value pairs if isinstance(disabled_by, str) and not isinstance( disabled_by, RegistryEntryDisabler ): report( # type: ignore[unreachable] "uses str for entity registry disabled_by. This is deprecated and will " "stop working in Home Assistant 2022.3, it should be updated to use " "RegistryEntryDisabler instead", error_if_core=False, ) disabled_by = RegistryEntryDisabler(disabled_by) for attr_name, value in ( ("area_id", area_id), ("capabilities", capabilities), ("config_entry_id", config_entry_id), ("device_class", device_class), ("device_id", device_id), ("disabled_by", disabled_by), ("entity_category", entity_category), ("icon", icon), ("name", name), ("original_device_class", original_device_class), ("original_icon", original_icon), ("original_name", original_name), ("supported_features", supported_features), ("unit_of_measurement", unit_of_measurement), ): if value is not UNDEFINED and value != getattr(old, attr_name): new_values[attr_name] = value old_values[attr_name] = getattr(old, attr_name) if new_entity_id is not UNDEFINED and new_entity_id != old.entity_id: if self.async_is_registered(new_entity_id): raise ValueError("Entity with this ID is already registered") if not valid_entity_id(new_entity_id): raise ValueError("Invalid entity ID") if split_entity_id(new_entity_id)[0] != split_entity_id(entity_id)[0]: raise ValueError("New entity ID should be same domain") self.entities.pop(entity_id) entity_id = new_values["entity_id"] = new_entity_id old_values["entity_id"] = old.entity_id if new_unique_id is not UNDEFINED: conflict_entity_id = self.async_get_entity_id( old.domain, old.platform, new_unique_id ) if conflict_entity_id: raise ValueError( f"Unique id '{new_unique_id}' is already in use by " f"'{conflict_entity_id}'" ) new_values["unique_id"] = new_unique_id old_values["unique_id"] = old.unique_id if not new_values: return old new = self.entities[entity_id] = attr.evolve(old, **new_values) self.async_schedule_save() data: dict[str, str | dict[str, Any]] = { "action": "update", "entity_id": entity_id, "changes": old_values, } if old.entity_id != entity_id: data["old_entity_id"] = old.entity_id self.hass.bus.async_fire(EVENT_ENTITY_REGISTRY_UPDATED, data) return new
def _sorted_states_to_json( hass, session, states, start_time, entity_ids, filters=None, include_start_time_state=True, minimal_response=False, ): """Convert SQL results into JSON friendly data structure. This takes our state list and turns it into a JSON friendly data structure {'entity_id': [list of states], 'entity_id2': [list of states]} States must be sorted by entity_id and last_updated We also need to go back and create a synthetic zero data point for each list of states, otherwise our graphs won't start on the Y axis correctly. """ result = defaultdict(list) # Set all entity IDs to empty lists in result set to maintain the order if entity_ids is not None: for ent_id in entity_ids: result[ent_id] = [] # Get the states at the start time timer_start = time.perf_counter() if include_start_time_state: run = recorder.run_information_from_instance(hass, start_time) for state in _get_states_with_session( session, start_time, entity_ids, run=run, filters=filters ): state.last_changed = start_time state.last_updated = start_time result[state.entity_id].append(state) if _LOGGER.isEnabledFor(logging.DEBUG): elapsed = time.perf_counter() - timer_start _LOGGER.debug("getting %d first datapoints took %fs", len(result), elapsed) # Called in a tight loop so cache the function # here _process_timestamp_to_utc_isoformat = process_timestamp_to_utc_isoformat # Append all changes to it for ent_id, group in groupby(states, lambda state: state.entity_id): domain = split_entity_id(ent_id)[0] ent_results = result[ent_id] if not minimal_response or domain in NEED_ATTRIBUTE_DOMAINS: ent_results.extend( [ native_state for native_state in (LazyState(db_state) for db_state in group) if ( domain != SCRIPT_DOMAIN or native_state.attributes.get(ATTR_CAN_CANCEL) ) ] ) continue # With minimal response we only provide a native # State for the first and last response. All the states # in-between only provide the "state" and the # "last_changed". if not ent_results: ent_results.append(LazyState(next(group))) prev_state = ent_results[-1] initial_state_count = len(ent_results) for db_state in group: # With minimal response we do not care about attribute # changes so we can filter out duplicate states if db_state.state == prev_state.state: continue ent_results.append( { STATE_KEY: db_state.state, LAST_CHANGED_KEY: _process_timestamp_to_utc_isoformat( db_state.last_changed ), } ) prev_state = db_state if prev_state and len(ent_results) != initial_state_count: # There was at least one state change # replace the last minimal state with # a full state ent_results[-1] = LazyState(prev_state) # Filter out the empty lists if some states had 0 results. return {key: val for key, val in result.items() if val}
def _update_state(self): # pylint: disable=r0914,r0912,r0915 """Update the sensor state.""" _LOGGER.debug('Updating sensor "%s"', self.name) start = end = start_ts = end_ts = None p_period = self._period # Parse templates self._update_period() if self._period is not None: now = datetime.datetime.now() start, end = self._period if p_period is None: p_start = p_end = now else: p_start, p_end = p_period # Convert times to UTC start = dt_util.as_utc(start) end = dt_util.as_utc(end) p_start = dt_util.as_utc(p_start) p_end = dt_util.as_utc(p_end) # Compute integer timestamps now_ts = math.floor(dt_util.as_timestamp(now)) start_ts = math.floor(dt_util.as_timestamp(start)) end_ts = math.floor(dt_util.as_timestamp(end)) p_start_ts = math.floor(dt_util.as_timestamp(p_start)) p_end_ts = math.floor(dt_util.as_timestamp(p_end)) # If period has not changed and current time after the period end.. if start_ts == p_start_ts and end_ts == p_end_ts and end_ts <= now_ts: # Don't compute anything as the value cannot have changed return self.available_sources = 0 values = [] self.count = 0 self.min_value = self.max_value = None # pylint: disable=too-many-nested-blocks for entity_id in self.sources: _LOGGER.debug('Processing entity "%s"', entity_id) state = self._hass.states.get(entity_id) # type: LazyState if state is None: _LOGGER.error('Unable to find an entity "%s"', entity_id) continue if self._temperature_mode is None: domain = split_entity_id(state.entity_id)[0] self._device_class = state.attributes.get(ATTR_DEVICE_CLASS) self._unit_of_measurement = state.attributes.get( ATTR_UNIT_OF_MEASUREMENT) self._temperature_mode = ( self._device_class == DEVICE_CLASS_TEMPERATURE or domain in (WEATHER_DOMAIN, CLIMATE_DOMAIN, WATER_HEATER_DOMAIN) or self._unit_of_measurement in TEMPERATURE_UNITS) if self._temperature_mode: _LOGGER.debug("%s is a temperature entity.", entity_id) self._device_class = DEVICE_CLASS_TEMPERATURE self._unit_of_measurement = self._hass.config.units.temperature_unit else: _LOGGER.debug("%s is NOT a temperature entity.", entity_id) self._icon = state.attributes.get(ATTR_ICON) value = 0 elapsed = 0 if self._period is None: # Get current state value = self._get_state_value(state) _LOGGER.debug("Current state: %s", value) else: # Get history between start and now history_list = history.state_changes_during_period( self.hass, start, end, str(entity_id)) if entity_id not in history_list.keys(): value = self._get_state_value(state) _LOGGER.warning( 'Historical data not found for entity "%s". ' "Current state used: %s", entity_id, value, ) else: # Get the first state item = history.get_state(self.hass, start, entity_id) _LOGGER.debug("Initial historical state: %s", item) last_state = None last_time = start_ts if item is not None and self._has_state(item.state): last_state = self._get_state_value(item) # Get the other states for item in history_list.get(entity_id): _LOGGER.debug("Historical state: %s", item) current_state = self._get_state_value(item) current_time = item.last_changed.timestamp() if last_state is not None: last_elapsed = current_time - last_time value += last_state * last_elapsed elapsed += last_elapsed last_state = current_state last_time = current_time # Count time elapsed between last history state and now if last_state is not None: last_elapsed = end_ts - last_time value += last_state * last_elapsed elapsed += last_elapsed if elapsed: value /= elapsed _LOGGER.debug("Historical average state: %s", value) if isinstance(value, numbers.Number): values.append(value) self.available_sources += 1 if values: self._state = round(sum(values) / len(values), self._precision) else: self._state = None _LOGGER.debug("Total average state: %s", self._state)
def _filter_lifecycle(self, entity_id: str) -> bool: """Template should re-render if the state changes.""" return ( split_entity_id(entity_id)[0] in self._domains or entity_id in self._entities )
def test_split_entity_id(): """Test split_entity_id.""" assert ha.split_entity_id('domain.object_id') == ['domain', 'object_id']
def entity_filter_3(entity_id: str) -> bool: """Return filter function for case 3.""" domain = split_entity_id(entity_id)[0] return not entity_excluded(domain, entity_id)
def entity_filter_2(entity_id: str) -> bool: """Return filter function for case 2.""" domain = split_entity_id(entity_id)[0] return entity_included(domain, entity_id)
def _exclude_events(events, config): """Get list of filtered events.""" excluded_entities = [] excluded_domains = [] included_entities = [] included_domains = [] exclude = config.get(CONF_EXCLUDE) if exclude: excluded_entities = exclude[CONF_ENTITIES] excluded_domains = exclude[CONF_DOMAINS] include = config.get(CONF_INCLUDE) if include: included_entities = include[CONF_ENTITIES] included_domains = include[CONF_DOMAINS] filtered_events = [] for event in events: domain, entity_id = None, None if event.event_type == EVENT_STATE_CHANGED: entity_id = event.data.get('entity_id') if entity_id is None: continue # Do not report on new entities if event.data.get('old_state') is None: continue new_state = event.data.get('new_state') # Do not report on entity removal if not new_state: continue attributes = new_state.get('attributes', {}) # If last_changed != last_updated only attributes have changed # we do not report on that yet. last_changed = new_state.get('last_changed') last_updated = new_state.get('last_updated') if last_changed != last_updated: continue domain = split_entity_id(entity_id)[0] # Also filter auto groups. if domain == 'group' and attributes.get('auto', False): continue # exclude entities which are customized hidden hidden = attributes.get(ATTR_HIDDEN, False) if hidden: continue elif event.event_type == EVENT_LOGBOOK_ENTRY: domain = event.data.get(ATTR_DOMAIN) entity_id = event.data.get(ATTR_ENTITY_ID) if domain or entity_id: # filter if only excluded is configured for this domain if excluded_domains and domain in excluded_domains and \ not included_domains: if (included_entities and entity_id not in included_entities) \ or not included_entities: continue # filter if only included is configured for this domain elif not excluded_domains and included_domains and \ domain not in included_domains: if (included_entities and entity_id not in included_entities) \ or not included_entities: continue # filter if included and excluded is configured for this domain elif excluded_domains and included_domains and \ (domain not in included_domains or domain in excluded_domains): if (included_entities and entity_id not in included_entities) \ or not included_entities or domain in excluded_domains: continue # filter if only included is configured for this entity elif not excluded_domains and not included_domains and \ included_entities and entity_id not in included_entities: continue # check if logbook entry is excluded for this entity if entity_id in excluded_entities: continue filtered_events.append(event) return filtered_events
def humanify(events): """Generate a converted list of events into Entry objects. Will try to group events if possible: - if 2+ sensor updates in GROUP_BY_MINUTES, show last - if home assistant stop and start happen in same minute call it restarted """ domain_prefixes = tuple('{}.'.format(dom) for dom in CONTINUOUS_DOMAINS) # Group events in batches of GROUP_BY_MINUTES for _, g_events in groupby( events, lambda event: event.time_fired.minute // GROUP_BY_MINUTES): events_batch = list(g_events) # Keep track of last sensor states last_sensor_event = {} # Group HA start/stop events # Maps minute of event to 1: stop, 2: stop + start start_stop_events = {} # Process events for event in events_batch: if event.event_type == EVENT_STATE_CHANGED: entity_id = event.data.get('entity_id') if entity_id.startswith(domain_prefixes): last_sensor_event[entity_id] = event elif event.event_type == EVENT_HOMEASSISTANT_STOP: if event.time_fired.minute in start_stop_events: continue start_stop_events[event.time_fired.minute] = 1 elif event.event_type == EVENT_HOMEASSISTANT_START: if event.time_fired.minute not in start_stop_events: continue start_stop_events[event.time_fired.minute] = 2 # Yield entries for event in events_batch: if event.event_type == EVENT_STATE_CHANGED: to_state = State.from_dict(event.data.get('new_state')) domain = to_state.domain # Skip all but the last sensor state if domain in CONTINUOUS_DOMAINS and \ event != last_sensor_event[to_state.entity_id]: continue # Don't show continuous sensor value changes in the logbook if domain in CONTINUOUS_DOMAINS and \ to_state.attributes.get('unit_of_measurement'): continue yield Entry(event.time_fired, name=to_state.name, message=_entry_message_from_state( domain, to_state), domain=domain, entity_id=to_state.entity_id) elif event.event_type == EVENT_HOMEASSISTANT_START: if start_stop_events.get(event.time_fired.minute) == 2: continue yield Entry(event.time_fired, "Home Assistant", "started", domain=HA_DOMAIN) elif event.event_type == EVENT_HOMEASSISTANT_STOP: if start_stop_events.get(event.time_fired.minute) == 2: action = "restarted" else: action = "stopped" yield Entry(event.time_fired, "Home Assistant", action, domain=HA_DOMAIN) elif event.event_type == EVENT_LOGBOOK_ENTRY: domain = event.data.get(ATTR_DOMAIN) entity_id = event.data.get(ATTR_ENTITY_ID) if domain is None and entity_id is not None: try: domain = split_entity_id(str(entity_id))[0] except IndexError: pass yield Entry(event.time_fired, event.data.get(ATTR_NAME), event.data.get(ATTR_MESSAGE), domain, entity_id)