def make_event(entity_id): """Make a mock event for test.""" domain = split_entity_id(entity_id)[0] state = mock.MagicMock( state="not blank", domain=domain, entity_id=entity_id, object_id="entity", attributes={}, ) return mock.MagicMock(data={"new_state": state}, time_fired=12345)
def __init__(self, *args): """Initialize a Switch accessory object.""" super().__init__(*args, category=CATEGORY_SWITCH) self._domain = split_entity_id(self.entity_id)[0] self._flag_state = False self.activate_only = self.is_activate(self.opp.states.get(self.entity_id)) serv_switch = self.add_preload_service(SERV_SWITCH) self.char_on = serv_switch.configure_char( CHAR_ON, value=False, setter_callback=self.set_state )
def __init__( self, opp, driver, name, entity_id, aid, config, category=CATEGORY_OTHER ): """Initialize a Accessory object.""" super().__init__(driver, name, aid=aid) model = split_entity_id(entity_id)[0].replace("_", " ").title() self.set_info_service( firmware_revision=__version__, manufacturer=MANUFACTURER, model=model, serial_number=entity_id, ) self.category = category self.config = config or {} self.entity_id = entity_id self.opp = opp self.debounce = {} self._support_battery_level = False self._support_battery_charging = True self.linked_battery_sensor = self.config.get(CONF_LINKED_BATTERY_SENSOR) self.low_battery_threshold = self.config.get( CONF_LOW_BATTERY_THRESHOLD, DEFAULT_LOW_BATTERY_THRESHOLD ) """Add battery service if available""" battery_found = self.opp.states.get(self.entity_id).attributes.get( ATTR_BATTERY_LEVEL ) if self.linked_battery_sensor: state = self.opp.states.get(self.linked_battery_sensor) if state is not None: battery_found = state.state else: self.linked_battery_sensor = None _LOGGER.warning( "%s: Battery sensor state missing: %s", self.entity_id, self.linked_battery_sensor, ) if battery_found is None: return _LOGGER.debug("%s: Found battery level", self.entity_id) self._support_battery_level = True serv_battery = self.add_preload_service(SERV_BATTERY_SERVICE) self._char_battery = serv_battery.configure_char(CHAR_BATTERY_LEVEL, value=0) self._char_charging = serv_battery.configure_char(CHAR_CHARGING_STATE, value=2) self._char_low_battery = serv_battery.configure_char( CHAR_STATUS_LOW_BATTERY, value=0 )
def __init__(self, *args): """Initialize a Switch accessory object.""" super().__init__(*args, category=CATEGORY_SWITCH) self._domain = split_entity_id(self.entity_id)[0] state = self.opp.states.get(self.entity_id) self.activate_only = self.is_activate( self.opp.states.get(self.entity_id)) serv_switch = self.add_preload_service(SERV_SWITCH) self.char_on = serv_switch.configure_char( CHAR_ON, value=False, setter_callback=self.set_state) # Set the state so it is in sync on initial # GET to avoid an event storm after homekit startup self.async_update_state(state)
async def async_handle_turn_service(service): """Handle calls to openpeerpower.turn_on/off.""" entity_ids = await async_extract_entity_ids(opp, service) # Generic turn on/off method requires entity id if not entity_ids: _LOGGER.error( "openpeerpower/%s cannot be called without entity_id", service.service ) return # Group entity_ids by domain. groupby requires sorted data. by_domain = it.groupby( sorted(entity_ids), lambda item: op.split_entity_id(item)[0] ) tasks = [] for domain, ent_ids in by_domain: # This leads to endless loop. if domain == DOMAIN: _LOGGER.warning( "Called service openpeerpower.%s with invalid entity IDs %s", service.service, ", ".join(ent_ids), ) continue # We want to block for all calls and only return when all calls # have been processed. If a service does not exist it causes a 10 # second delay while we're blocking waiting for a response. # But services can be registered on other HA instances that are # listening to the bus too. So as an in between solution, we'll # block only if the service is defined in the current HA instance. blocking = opp.services.has_service(domain, service.service) # Create a new dict for this call data = dict(service.data) # ent_ids is a generator, convert it to a list. data[ATTR_ENTITY_ID] = list(ent_ids) tasks.append( opp.services.async_call(domain, service.service, data, blocking) ) if tasks: await asyncio.gather(*tasks)
def validate_entity_config(values): """Validate config entry for CONF_ENTITY.""" if not isinstance(values, dict): raise vol.Invalid("expected a dictionary") entities = {} for entity_id, config in values.items(): entity = cv.entity_id(entity_id) domain, _ = split_entity_id(entity) if not isinstance(config, dict): raise vol.Invalid( f"The configuration for {entity} must be a dictionary.") if domain in ("alarm_control_panel", "lock"): config = CODE_SCHEMA(config) elif domain == media_player.const.DOMAIN: config = FEATURE_SCHEMA(config) feature_list = {} for feature in config[CONF_FEATURE_LIST]: params = MEDIA_PLAYER_SCHEMA(feature) key = params.pop(CONF_FEATURE) if key in feature_list: raise vol.Invalid( f"A feature can be added only once for {entity}") feature_list[key] = params config[CONF_FEATURE_LIST] = feature_list elif domain == "camera": config = CAMERA_SCHEMA(config) elif domain == "switch": config = SWITCH_TYPE_SCHEMA(config) elif domain == "humidifier": config = HUMIDIFIER_SCHEMA(config) elif domain == "cover": config = COVER_SCHEMA(config) else: config = BASIC_INFO_SCHEMA(config) entities[entity] = config return entities
def _async_dispatch_domain_event( opp: OpenPeerPower, event: Event, callbacks: dict[str, list] ) -> None: domain = split_entity_id(event.data["entity_id"])[0] if domain not in callbacks and MATCH_ALL not in callbacks: return listeners = callbacks.get(domain, []) + callbacks.get(MATCH_ALL, []) for job in listeners: try: opp.async_run_opp_job(job, event) except Exception: # pylint: disable=broad-except _LOGGER.exception( "Error while processing event %s for domain %s", event, domain )
def __init__( self, api, camera_entity, name, save_file_folder, save_timestamped_file ): """Init.""" self._api = api self._camera = camera_entity if name: self._name = name else: camera_name = split_entity_id(camera_entity)[1] self._name = f"sighthound_{camera_name}" self._state = None self._last_detection = None self._image_width = None self._image_height = None self._save_file_folder = save_file_folder self._save_timestamped_file = save_timestamped_file
def _create_state_changed_event_from_old_new( entity_id, event_time_fired, old_state, new_state ): """Create a state changed event from a old and new state.""" attributes = {} if new_state is not None: attributes = new_state.get("attributes") attributes_json = json.dumps(attributes, cls=JSONEncoder) if attributes_json == "null": attributes_json = "{}" row = collections.namedtuple( "Row", [ "event_type" "event_data" "time_fired" "context_id" "context_user_id" "state" "entity_id" "domain" "attributes" "state_id", "old_state_id", ], ) row.event_type = EVENT_STATE_CHANGED row.event_data = "{}" row.attributes = attributes_json row.time_fired = event_time_fired row.state = new_state and new_state.get("state") row.entity_id = entity_id row.domain = entity_id and core.split_entity_id(entity_id)[0] row.context_id = None row.context_user_id = None row.old_state_id = old_state and 1 row.state_id = new_state and 1 # pylint: disable=import-outside-toplevel from openpeerpower.components import logbook return logbook.LazyEventPartialState(row)
def get(self, entity_id: str) -> Dict: """Get config for an entity id.""" if entity_id in self._cache: return self._cache[entity_id] domain, _ = split_entity_id(entity_id) result = self._cache[entity_id] = {} if self._domain is not None and domain in self._domain: result.update(self._domain[domain]) if self._glob is not None: for pattern, values in self._glob.items(): if pattern.match(entity_id): result.update(values) if self._exact is not None and entity_id in self._exact: result.update(self._exact[entity_id]) return result
def handle_event(self, event): """Listen for new messages on the bus, and add them to Prometheus.""" state = event.data.get("new_state") if state is None: return entity_id = state.entity_id _LOGGER.debug("Handling state update for %s", entity_id) domain, _ = hacore.split_entity_id(entity_id) if not self._filter(state.entity_id): return ignored_states = (STATE_UNAVAILABLE, STATE_UNKNOWN) handler = f"_handle_{domain}" if hasattr(self, handler) and state.state not in ignored_states: getattr(self, handler)(state) labels = self._labels(state) state_change = self._metric("state_change", self.prometheus_cli.Counter, "The number of state changes") state_change.labels(**labels).inc() entity_available = self._metric( "entity_available", self.prometheus_cli.Gauge, "Entity is available (not in the unavailable or unknown state)", ) entity_available.labels(**labels).set( float(state.state not in ignored_states)) last_updated_time_seconds = self._metric( "last_updated_time_seconds", self.prometheus_cli.Gauge, "The last_updated timestamp", ) last_updated_time_seconds.labels(**labels).set( state.last_updated.timestamp())
async def test_switch_set_state(opp, hk_driver, entity_id, attrs, events): """Test if accessory and OPP are updated accordingly.""" domain = split_entity_id(entity_id)[0] opp.states.async_set(entity_id, None, attrs) await opp.async_block_till_done() acc = Switch(opp, hk_driver, "Switch", entity_id, 2, None) await acc.run() await opp.async_block_till_done() assert acc.aid == 2 assert acc.category == 8 # Switch assert acc.activate_only is False assert acc.char_on.value is False opp.states.async_set(entity_id, STATE_ON, attrs) await opp.async_block_till_done() assert acc.char_on.value is True opp.states.async_set(entity_id, STATE_OFF, attrs) await opp.async_block_till_done() assert acc.char_on.value is False # Set from HomeKit call_turn_on = async_mock_service(opp, domain, "turn_on") call_turn_off = async_mock_service(opp, domain, "turn_off") await opp.async_add_executor_job(acc.char_on.client_update_value, True) await opp.async_block_till_done() assert call_turn_on assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id assert len(events) == 1 assert events[-1].data[ATTR_VALUE] is None await opp.async_add_executor_job(acc.char_on.client_update_value, False) await opp.async_block_till_done() assert call_turn_off assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id assert len(events) == 2 assert events[-1].data[ATTR_VALUE] is None
def _should_expose_entity_id(self, entity_id): """If an entity ID should be exposed.""" if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES: return False if not self._config["filter"].empty_filter: return self._config["filter"](entity_id) entity_configs = self._prefs.google_entity_configs entity_config = entity_configs.get(entity_id, {}) entity_expose = entity_config.get(PREF_SHOULD_EXPOSE) if entity_expose is not None: return entity_expose default_expose = self._prefs.google_default_expose # Backwards compat if default_expose is None: return True return split_entity_id(entity_id)[0] in default_expose
def __init__(self, ip_address, port, username, password, hostname, camera_entity, name=None): """Init with the API key and model id.""" super().__init__() self._url_check = f"http://{ip_address}:{port}/{CLASSIFIER}/check" self._url_teach = f"http://{ip_address}:{port}/{CLASSIFIER}/teach" self._username = username self._password = password self._hostname = hostname self._camera = camera_entity if name: self._name = name else: camera_name = split_entity_id(camera_entity)[1] self._name = f"{CLASSIFIER} {camera_name}" self._matched = {}
def expand_entity_ids(opp: OpenPeerPower, entity_ids: Iterable[Any]) -> list[str]: """Return entity_ids with group entity ids replaced by their members. Async friendly. """ found_ids: list[str] = [] for entity_id in entity_ids: if not isinstance(entity_id, str) or entity_id in ( ENTITY_MATCH_NONE, ENTITY_MATCH_ALL, ): continue entity_id = entity_id.lower() try: # If entity_id points at a group, expand it domain, _ = ha.split_entity_id(entity_id) if domain == DOMAIN: child_entities = get_entity_ids(opp, entity_id) if entity_id in child_entities: child_entities = list(child_entities) child_entities.remove(entity_id) found_ids.extend( ent_id for ent_id in expand_entity_ids(opp, child_entities) if ent_id not in found_ids ) else: if entity_id not in found_ids: found_ids.append(entity_id) except AttributeError: # Raised by split_entity_id if entity_id is not a string pass return found_ids
async def _entity_registry_updated(self, event) -> None: """Handle entity registry updated.""" if event.data["action"] != "remove": return entity_id = event.data[ATTR_ENTITY_ID] if split_entity_id(entity_id)[0] != "device_tracker": return for person in list(self.data.values()): if entity_id not in person[CONF_DEVICE_TRACKERS]: continue await self.async_update_item( person[collection.CONF_ID], { CONF_DEVICE_TRACKERS: [ devt for devt in person[CONF_DEVICE_TRACKERS] if devt != entity_id ] }, )
def _set_tracked(self, entity_ids): """Tuple of entities to be tracked.""" # tracking are the entities we want to track # trackable are the entities we actually watch if not entity_ids: self.tracking = () self.trackable = () return excluded_domains = self.opp.data[REG_KEY].exclude_domains tracking = [] trackable = [] for ent_id in entity_ids: ent_id_lower = ent_id.lower() domain = split_entity_id(ent_id_lower)[0] tracking.append(ent_id_lower) if domain not in excluded_domains: trackable.append(ent_id_lower) self.trackable = tuple(trackable) self.tracking = tuple(tracking)
def from_event(event): """Create object from a state_changed event.""" entity_id = event.data["entity_id"] state = event.data.get("new_state") dbstate = States(entity_id=entity_id) # State got deleted if state is None: dbstate.state = "" dbstate.domain = split_entity_id(entity_id)[0] dbstate.attributes = "{}" dbstate.last_changed = event.time_fired dbstate.last_updated = event.time_fired else: dbstate.domain = state.domain dbstate.state = state.state dbstate.attributes = json.dumps(dict(state.attributes), cls=JSONEncoder) dbstate.last_changed = state.last_changed dbstate.last_updated = state.last_updated return dbstate
def test_split_entity_id(): """Test split_entity_id.""" assert ha.split_entity_id("domain.object_id") == ["domain", "object_id"]
def _async_update_entity( self, entity_id: str, *, name: str | None | UndefinedType = UNDEFINED, icon: str | None | UndefinedType = UNDEFINED, config_entry_id: str | None | UndefinedType = UNDEFINED, new_entity_id: str | UndefinedType = UNDEFINED, device_id: str | None | UndefinedType = UNDEFINED, area_id: str | None | UndefinedType = UNDEFINED, new_unique_id: str | UndefinedType = UNDEFINED, disabled_by: str | None | UndefinedType = UNDEFINED, capabilities: Mapping[str, Any] | None | UndefinedType = UNDEFINED, supported_features: int | UndefinedType = UNDEFINED, device_class: str | None | UndefinedType = UNDEFINED, unit_of_measurement: str | None | UndefinedType = UNDEFINED, original_name: str | None | UndefinedType = UNDEFINED, original_icon: str | None | UndefinedType = UNDEFINED, ) -> RegistryEntry: """Private facing update properties method.""" old = self.entities[entity_id] new_values = {} # Dict with new key/value pairs old_values = {} # Dict with old key/value pairs for attr_name, value in ( ("name", name), ("icon", icon), ("config_entry_id", config_entry_id), ("device_id", device_id), ("area_id", area_id), ("disabled_by", disabled_by), ("capabilities", capabilities), ("supported_features", supported_features), ("device_class", device_class), ("unit_of_measurement", unit_of_measurement), ("original_name", original_name), ("original_icon", original_icon), ): if value is not UNDEFINED and value != getattr(old, attr_name): new_values[attr_name] = value old_values[attr_name] = getattr(old, attr_name) if new_entity_id is not UNDEFINED and new_entity_id != old.entity_id: if self.async_is_registered(new_entity_id): raise ValueError("Entity with this ID is already registered") if not valid_entity_id(new_entity_id): raise ValueError("Invalid entity ID") if split_entity_id(new_entity_id)[0] != split_entity_id(entity_id)[0]: raise ValueError("New entity ID should be same domain") self.entities.pop(entity_id) entity_id = new_values["entity_id"] = new_entity_id old_values["entity_id"] = old.entity_id if new_unique_id is not UNDEFINED: conflict_entity_id = self.async_get_entity_id( old.domain, old.platform, new_unique_id ) if conflict_entity_id: raise ValueError( f"Unique id '{new_unique_id}' is already in use by " f"'{conflict_entity_id}'" ) new_values["unique_id"] = new_unique_id old_values["unique_id"] = old.unique_id if not new_values: return old self._remove_index(old) new = attr.evolve(old, **new_values) self._register_entry(new) self.async_schedule_save() data = {"action": "update", "entity_id": entity_id, "changes": old_values} if old.entity_id != entity_id: data["old_entity_id"] = old.entity_id self.opp.bus.async_fire(EVENT_ENTITY_REGISTRY_UPDATED, data) return new
def entity_filter_3(entity_id: str) -> bool: """Return filter function for case 3.""" domain = split_entity_id(entity_id)[0] return not entity_excluded(domain, entity_id)
def entity_filter_2(entity_id: str) -> bool: """Return filter function for case 2.""" domain = split_entity_id(entity_id)[0] return entity_included(domain, entity_id)
def _keep_event(event, entities_filter): domain, entity_id = None, None if event.event_type == EVENT_STATE_CHANGED: entity_id = event.data.get("entity_id") if entity_id is None: return False # Do not report on new entities if event.data.get("old_state") is None: return False new_state = event.data.get("new_state") # Do not report on entity removal if not new_state: return False attributes = new_state.get("attributes", {}) # If last_changed != last_updated only attributes have changed # we do not report on that yet. last_changed = new_state.get("last_changed") last_updated = new_state.get("last_updated") if last_changed != last_updated: return False domain = split_entity_id(entity_id)[0] # Also filter auto groups. if domain == "group" and attributes.get("auto", False): return False # exclude entities which are customized hidden hidden = attributes.get(ATTR_HIDDEN, False) if hidden: return False elif event.event_type == EVENT_LOGBOOK_ENTRY: domain = event.data.get(ATTR_DOMAIN) entity_id = event.data.get(ATTR_ENTITY_ID) elif event.event_type == EVENT_AUTOMATION_TRIGGERED: domain = "automation" entity_id = event.data.get(ATTR_ENTITY_ID) elif event.event_type == EVENT_SCRIPT_STARTED: domain = "script" entity_id = event.data.get(ATTR_ENTITY_ID) elif event.event_type == EVENT_ALEXA_SMART_HOME: domain = "alexa" elif event.event_type == EVENT_HOMEKIT_CHANGED: domain = DOMAIN_HOMEKIT if not entity_id and domain: entity_id = f"{domain}." return not entity_id or entities_filter(entity_id)
def _sorted_states_to_dict( opp, session, states, start_time, entity_ids, filters=None, include_start_time_state=True, minimal_response=False, ): """Convert SQL results into JSON friendly data structure. This takes our state list and turns it into a JSON friendly data structure {'entity_id': [list of states], 'entity_id2': [list of states]} States must be sorted by entity_id and last_updated We also need to go back and create a synthetic zero data point for each list of states, otherwise our graphs won't start on the Y axis correctly. """ result = defaultdict(list) # Set all entity IDs to empty lists in result set to maintain the order if entity_ids is not None: for ent_id in entity_ids: result[ent_id] = [] # Get the states at the start time timer_start = time.perf_counter() if include_start_time_state: run = recorder.run_information_from_instance(opp, start_time) for state in _get_states_with_session(opp, session, start_time, entity_ids, run=run, filters=filters): state.last_changed = start_time state.last_updated = start_time result[state.entity_id].append(state) if _LOGGER.isEnabledFor(logging.DEBUG): elapsed = time.perf_counter() - timer_start _LOGGER.debug("getting %d first datapoints took %fs", len(result), elapsed) # Called in a tight loop so cache the function # here _process_timestamp_to_utc_isoformat = process_timestamp_to_utc_isoformat # Append all changes to it for ent_id, group in groupby(states, lambda state: state.entity_id): domain = split_entity_id(ent_id)[0] ent_results = result[ent_id] if not minimal_response or domain in NEED_ATTRIBUTE_DOMAINS: ent_results.extend(LazyState(db_state) for db_state in group) # With minimal response we only provide a native # State for the first and last response. All the states # in-between only provide the "state" and the # "last_changed". if not ent_results: ent_results.append(LazyState(next(group))) prev_state = ent_results[-1] initial_state_count = len(ent_results) for db_state in group: # With minimal response we do not care about attribute # changes so we can filter out duplicate states if db_state.state == prev_state.state: continue ent_results.append({ STATE_KEY: db_state.state, LAST_CHANGED_KEY: _process_timestamp_to_utc_isoformat(db_state.last_changed), }) prev_state = db_state if prev_state and len(ent_results) != initial_state_count: # There was at least one state change # replace the last minimal state with # a full state ent_results[-1] = LazyState(prev_state) # Filter out the empty lists if some states had 0 results. return {key: val for key, val in result.items() if val}
def humanify(opp, events): """Generate a converted list of events into Entry objects. Will try to group events if possible: - if 2+ sensor updates in GROUP_BY_MINUTES, show last - if Open Peer Power stop and start happen in same minute call it restarted """ domain_prefixes = tuple(f"{dom}." for dom in CONTINUOUS_DOMAINS) # Group events in batches of GROUP_BY_MINUTES for _, g_events in groupby( events, lambda event: event.time_fired.minute // GROUP_BY_MINUTES): events_batch = list(g_events) # Keep track of last sensor states last_sensor_event = {} # Group HA start/stop events # Maps minute of event to 1: stop, 2: stop + start start_stop_events = {} # Process events for event in events_batch: if event.event_type == EVENT_STATE_CHANGED: entity_id = event.data.get("entity_id") if entity_id.startswith(domain_prefixes): last_sensor_event[entity_id] = event elif event.event_type == EVENT_OPENPEERPOWER_STOP: if event.time_fired.minute in start_stop_events: continue start_stop_events[event.time_fired.minute] = 1 elif event.event_type == EVENT_OPENPEERPOWER_START: if event.time_fired.minute not in start_stop_events: continue start_stop_events[event.time_fired.minute] = 2 # Yield entries for event in events_batch: if event.event_type == EVENT_STATE_CHANGED: to_state = State.from_dict(event.data.get("new_state")) domain = to_state.domain # Skip all but the last sensor state if (domain in CONTINUOUS_DOMAINS and event != last_sensor_event[to_state.entity_id]): continue # Don't show continuous sensor value changes in the logbook if domain in CONTINUOUS_DOMAINS and to_state.attributes.get( "unit_of_measurement"): continue yield { "when": event.time_fired, "name": to_state.name, "message": _entry_message_from_state(domain, to_state), "domain": domain, "entity_id": to_state.entity_id, "context_id": event.context.id, "context_user_id": event.context.user_id, } elif event.event_type == EVENT_OPENPEERPOWER_START: if start_stop_events.get(event.time_fired.minute) == 2: continue yield { "when": event.time_fired, "name": "Open Peer Power", "message": "started", "domain": HA_DOMAIN, "context_id": event.context.id, "context_user_id": event.context.user_id, } elif event.event_type == EVENT_OPENPEERPOWER_STOP: if start_stop_events.get(event.time_fired.minute) == 2: action = "restarted" else: action = "stopped" yield { "when": event.time_fired, "name": "Open Peer Power", "message": action, "domain": HA_DOMAIN, "context_id": event.context.id, "context_user_id": event.context.user_id, } elif event.event_type == EVENT_LOGBOOK_ENTRY: domain = event.data.get(ATTR_DOMAIN) entity_id = event.data.get(ATTR_ENTITY_ID) if domain is None and entity_id is not None: try: domain = split_entity_id(str(entity_id))[0] except IndexError: pass yield { "when": event.time_fired, "name": event.data.get(ATTR_NAME), "message": event.data.get(ATTR_MESSAGE), "domain": domain, "entity_id": entity_id, "context_id": event.context.id, "context_user_id": event.context.user_id, } elif event.event_type == EVENT_ALEXA_SMART_HOME: data = event.data entity_id = data["request"].get("entity_id") if entity_id: state = opp.states.get(entity_id) name = state.name if state else entity_id message = "send command {}/{} for {}".format( data["request"]["namespace"], data["request"]["name"], name) else: message = "send command {}/{}".format( data["request"]["namespace"], data["request"]["name"]) yield { "when": event.time_fired, "name": "Amazon Alexa", "message": message, "domain": "alexa", "entity_id": entity_id, "context_id": event.context.id, "context_user_id": event.context.user_id, } elif event.event_type == EVENT_HOMEKIT_CHANGED: data = event.data entity_id = data.get(ATTR_ENTITY_ID) value = data.get(ATTR_VALUE) value_msg = f" to {value}" if value else "" message = "send command {}{} for {}".format( data[ATTR_SERVICE], value_msg, data[ATTR_DISPLAY_NAME]) yield { "when": event.time_fired, "name": "HomeKit", "message": message, "domain": DOMAIN_HOMEKIT, "entity_id": entity_id, "context_id": event.context.id, "context_user_id": event.context.user_id, } elif event.event_type == EVENT_AUTOMATION_TRIGGERED: yield { "when": event.time_fired, "name": event.data.get(ATTR_NAME), "message": "has been triggered", "domain": "automation", "entity_id": event.data.get(ATTR_ENTITY_ID), "context_id": event.context.id, "context_user_id": event.context.user_id, } elif event.event_type == EVENT_SCRIPT_STARTED: yield { "when": event.time_fired, "name": event.data.get(ATTR_NAME), "message": "started", "domain": "script", "entity_id": event.data.get(ATTR_ENTITY_ID), "context_id": event.context.id, "context_user_id": event.context.user_id, }
async def _async_add_entity( # noqa: C901 self, entity: Entity, update_before_add: bool, entity_registry: EntityRegistry, device_registry: DeviceRegistry, ) -> None: """Add an entity to the platform.""" if entity is None: raise ValueError("Entity cannot be None") entity.add_to_platform_start( self.opp, self, self._get_parallel_updates_semaphore( hasattr(entity, "async_update")), ) # Update properties before we generate the entity_id if update_before_add: try: await entity.async_device_update(warning=False) except Exception: # pylint: disable=broad-except self.logger.exception("%s: Error on device update!", self.platform_name) entity.add_to_platform_abort() return requested_entity_id = None suggested_object_id: str | None = None generate_new_entity_id = False # Get entity_id from unique ID registration if entity.unique_id is not None: if entity.entity_id is not None: requested_entity_id = entity.entity_id suggested_object_id = split_entity_id(entity.entity_id)[1] else: suggested_object_id = entity.name # type: ignore[unreachable] if self.entity_namespace is not None: suggested_object_id = f"{self.entity_namespace} {suggested_object_id}" if self.config_entry is not None: config_entry_id: str | None = self.config_entry.entry_id else: config_entry_id = None device_info = entity.device_info device_id = None if config_entry_id is not None and device_info is not None: processed_dev_info = {"config_entry_id": config_entry_id} for key in ( "connections", "identifiers", "manufacturer", "model", "name", "default_manufacturer", "default_model", "default_name", "sw_version", "entry_type", "via_device", "suggested_area", ): if key in device_info: processed_dev_info[key] = device_info[ key] # type: ignore[misc] try: device = device_registry.async_get_or_create( **processed_dev_info) # type: ignore[arg-type] device_id = device.id except RequiredParameterMissing: pass disabled_by: str | None = None if not entity.entity_registry_enabled_default: disabled_by = DISABLED_INTEGRATION entry = entity_registry.async_get_or_create( self.domain, self.platform_name, entity.unique_id, suggested_object_id=suggested_object_id, config_entry=self.config_entry, device_id=device_id, known_object_ids=self.entities.keys(), disabled_by=disabled_by, capabilities=entity.capability_attributes, supported_features=entity.supported_features, device_class=entity.device_class, unit_of_measurement=entity.unit_of_measurement, original_name=entity.name, original_icon=entity.icon, ) entity.registry_entry = entry entity.entity_id = entry.entity_id if entry.disabled: self.logger.info( "Not adding entity %s because it's disabled", entry.name or entity.name or f'"{self.platform_name} {entity.unique_id}"', ) entity.add_to_platform_abort() return # We won't generate an entity ID if the platform has already set one # We will however make sure that platform cannot pick a registered ID elif entity.entity_id is not None and entity_registry.async_is_registered( entity.entity_id): # If entity already registered, convert entity id to suggestion suggested_object_id = split_entity_id(entity.entity_id)[1] generate_new_entity_id = True # Generate entity ID if entity.entity_id is None or generate_new_entity_id: suggested_object_id = (suggested_object_id or entity.name or DEVICE_DEFAULT_NAME) if self.entity_namespace is not None: suggested_object_id = f"{self.entity_namespace} {suggested_object_id}" entity.entity_id = entity_registry.async_generate_entity_id( self.domain, suggested_object_id, self.entities.keys()) # Make sure it is valid in case an entity set the value themselves if not valid_entity_id(entity.entity_id): entity.add_to_platform_abort() raise OpenPeerPowerError(f"Invalid entity ID: {entity.entity_id}") already_exists = entity.entity_id in self.entities restored = False if not already_exists and not self.opp.states.async_available( entity.entity_id): existing = self.opp.states.get(entity.entity_id) if existing is not None and ATTR_RESTORED in existing.attributes: restored = True else: already_exists = True if already_exists: if entity.unique_id is not None: msg = f"Platform {self.platform_name} does not generate unique IDs. " if requested_entity_id: msg += f"ID {entity.unique_id} is already used by {entity.entity_id} - ignoring {requested_entity_id}" else: msg += f"ID {entity.unique_id} already exists - ignoring {entity.entity_id}" else: msg = f"Entity id already exists - ignoring: {entity.entity_id}" self.logger.error(msg) entity.add_to_platform_abort() return entity_id = entity.entity_id self.entities[entity_id] = entity if not restored: # Reserve the state in the state machine # because as soon as we return control to the event # loop below, another entity could be added # with the same id before `entity.add_to_platform_finish()` # has a chance to finish. self.opp.states.async_reserve(entity.entity_id) def remove_entity_cb() -> None: """Remove entity from entities list.""" self.entities.pop(entity_id) entity.async_on_remove(remove_entity_cb) await entity.add_to_platform_finish()
def entity_filter_3(entity_id: str) -> bool: """Return filter function for case 3.""" domain = split_entity_id(entity_id)[0] return entity_id not in exclude_e and domain not in exclude_d
def entity_filter_4a(entity_id: str) -> bool: """Return filter function for case 4a.""" domain = split_entity_id(entity_id)[0] if domain in include_d: return entity_id not in exclude_e return entity_id in include_e
def _filter_domains_and_entities(self, entity_id: str) -> bool: """Template should re-render if the entity state changes when we match specific domains or entities.""" return (split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities)
def _filter_lifecycle_domains(self, entity_id: str) -> bool: """Template should re-render if the entity is added or removed with domains watched.""" return split_entity_id(entity_id)[0] in self.domains_lifecycle