async def _generate_entity(self, external_id): """Generate new entity.""" async_dispatcher_send( self._opp, self.async_event_new_entity(), self, self._config_entry.unique_id, external_id, )
def add_to_platform_abort(self) -> None: """Abort adding an entity to a platform.""" if self._discovery_data: discovery_hash = self._discovery_data[ATTR_DISCOVERY_HASH] clear_discovery_hash(self.opp, discovery_hash) async_dispatcher_send(self.opp, MQTT_DISCOVERY_DONE.format(discovery_hash), None) super().add_to_platform_abort()
async def async_remove_device(address): """Remove the device and all entities from opp.""" signal = f"{address.id}_{SIGNAL_REMOVE_ENTITY}" async_dispatcher_send(opp, signal) dev_registry = await opp.helpers.device_registry.async_get_registry() device = dev_registry.async_get_device(identifiers={(DOMAIN, str(address))}) if device: dev_registry.async_remove_device(device.id)
async def async_srv_load_aldb(service): """Load the device All-Link database.""" entity_id = service.data[CONF_ENTITY_ID] reload = service.data[SRV_LOAD_DB_RELOAD] if entity_id.lower() == ENTITY_MATCH_ALL: await async_srv_load_aldb_all(reload) else: signal = f"{entity_id}_{SIGNAL_LOAD_ALDB}" async_dispatcher_send(opp, signal, reload)
def attribute_updated(self, attrid, value): """Handle attribute update from window_covering cluster.""" attr_name = self.cluster.attributes.get(attrid, [attrid])[0] self.debug("Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value) if attrid == self._value_attribute: async_dispatcher_send(self._zha_device.opp, f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", value)
async def async_update(self): """Retrieve latest state.""" result = await self.get_attribute_value( "current_position_lift_percentage", from_cache=False) self.debug("read current position: %s", result) async_dispatcher_send(self._zha_device.opp, f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", result)
async def _async_handle_conversation_event(self, event): if isinstance(event, ChatMessageEvent): dispatcher.async_dispatcher_send( self.opp, EVENT_HANGOUTS_MESSAGE_RECEIVED, event.conversation_id, event.user_id, event, )
async def async_update(self) -> None: """Get the latest state data from the gateway.""" try: await self._heater.update() except (ClientResponseError, asyncio.TimeoutError) as err: _LOGGER.warning("Update failed, message is: %s", err) else: async_dispatcher_send(self.opp, DOMAIN)
async def set_led_mode(call): """Set the OpenTherm Gateway LED modes.""" gw_dev = opp.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][ call.data[ATTR_GW_ID]] led_id = call.data[ATTR_ID] led_mode = call.data[ATTR_MODE] mode = await gw_dev.gateway.set_led_mode(led_id, led_mode) led_var = getattr(gw_vars, f"OTGW_LED_{led_id}") gw_dev.status.update({led_var: mode}) async_dispatcher_send(opp, gw_dev.update_signal, gw_dev.status)
async def set_gpio_mode(call): """Set the OpenTherm Gateway GPIO modes.""" gw_dev = opp.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][ call.data[ATTR_GW_ID]] gpio_id = call.data[ATTR_ID] gpio_mode = call.data[ATTR_MODE] mode = await gw_dev.gateway.set_gpio_mode(gpio_id, gpio_mode) gpio_var = getattr(gw_vars, f"OTGW_GPIO_{gpio_id}") gw_dev.status.update({gpio_var: mode}) async_dispatcher_send(opp, gw_dev.update_signal, gw_dev.status)
async def async_update(self): """Get the latest data from the Glances REST API.""" try: await self.api.get_data() self.available = True except exceptions.GlancesApiError: _LOGGER.error("Unable to fetch data from Glances") self.available = False _LOGGER.debug("Glances data updated") async_dispatcher_send(self.opp, DATA_UPDATED)
def async_connection_status_callback(self, status): """Handle signals of device connection status. This is called on every RTSP keep-alive message. Only signal state change if state change is true. """ if self.available != (status == SIGNAL_PLAYING): self.available = not self.available async_dispatcher_send(self.opp, self.signal_reachable, True)
def update_device(self, device: DynaliteBaseDevice | None = None) -> None: """Call when a device or all devices should be updated.""" if not device: # This is used to signal connection or disconnection, so all devices may become available or not. log_string = ("Connected" if self.dynalite_devices.connected else "Disconnected") LOGGER.info("%s to dynalite host", log_string) async_dispatcher_send(self.opp, self.update_signal()) else: async_dispatcher_send(self.opp, self.update_signal(device))
async def async_new_address_callback(opp, entry): """Handle signals of device getting new address. Called when config entry is updated. This is a static method because a class method (bound method), can not be used with weak references. """ device = opp.data[AXIS_DOMAIN][entry.unique_id] device.api.config.host = device.host async_dispatcher_send(opp, device.signal_new_address)
async def async_load_entities(opp: OpenPeerPower) -> None: """Load entities after integration was setup.""" await opp.data[DATA_ZHA][DATA_ZHA_GATEWAY ].async_initialize_devices_and_entities() to_setup = opp.data[DATA_ZHA][DATA_ZHA_PLATFORM_LOADED] results = await asyncio.gather(*to_setup, return_exceptions=True) for res in results: if isinstance(res, Exception): _LOGGER.warning("Couldn't setup zha platform: %s", res) async_dispatcher_send(opp, SIGNAL_ADD_ENTITIES)
def async_update_from_websocket(self, state): """Update the entity based on new websocket data.""" self.update_state(state) self.async_write_op_state() async_dispatcher_send( self.opp, PLEX_UPDATE_SENSOR_SIGNAL.format( self.plex_server.machine_identifier), )
def instance_remove(instance_num: int) -> None: """Remove entities for an old Hyperion instance.""" assert server_id for light_type in LIGHT_TYPES: async_dispatcher_send( opp, SIGNAL_ENTITY_REMOVE.format( get_hyperion_unique_id(server_id, instance_num, light_type)), )
def instance_remove(instance_num: int) -> None: """Remove entities for an old Hyperion instance.""" assert server_id for component in COMPONENT_SWITCHES: async_dispatcher_send( opp, SIGNAL_ENTITY_REMOVE.format( _component_to_unique_id(server_id, component, instance_num), ), )
async def async_update(self) -> None: """Get the data from Kaiterra API.""" try: with async_timeout.timeout(10): data = await self._api.get_latest_sensor_readings(self._devices ) except (ClientResponseError, asyncio.TimeoutError): _LOGGER.debug("Couldn't fetch data from Kaiterra API") self.data = {} async_dispatcher_send(self._opp, DISPATCHER_KAITERRA) return _LOGGER.debug("New data retrieved: %s", data) try: self.data = {} for i, device in enumerate(data): if not device: self.data[self._devices_ids[i]] = {} continue aqi, main_pollutant = None, None for sensor_name, sensor in device.items(): points = sensor.get("points") if not points: continue point = points[0] sensor["value"] = point.get("value") if "aqi" not in point: continue sensor["aqi"] = point["aqi"] if not aqi or aqi < point["aqi"]: aqi = point["aqi"] main_pollutant = POLLUTANTS.get(sensor_name) level = None for j in range(1, len(self._scale)): if aqi <= self._scale[j]: level = self._level[j - 1] break device["aqi"] = {"value": aqi} device["aqi_level"] = {"value": level} device["aqi_pollutant"] = {"value": main_pollutant} self.data[self._devices_ids[i]] = device except IndexError as err: _LOGGER.error("Parsing error %s", err) async_dispatcher_send(self._opp, DISPATCHER_KAITERRA)
async def async_refresh_data(self, now): """Refresh the leaf data and update the datastore.""" if self.request_in_progress: _LOGGER.debug("Refresh currently in progress for %s", self.leaf.nickname) return _LOGGER.debug("Updating Nissan Leaf Data") self.last_check = datetime.today() self.request_in_progress = True server_response = await self.async_get_battery() if server_response is not None: _LOGGER.debug("Server Response: %s", server_response.__dict__) if server_response.answer["status"] == HTTP_OK: self.data[DATA_BATTERY] = server_response.battery_percent # pycarwings2 library doesn't always provide cruising rnages # so we have to check if they exist before we can use them. # Root cause: the nissan servers don't always send the data. if hasattr(server_response, "cruising_range_ac_on_km"): self.data[ DATA_RANGE_AC] = server_response.cruising_range_ac_on_km else: self.data[DATA_RANGE_AC] = None if hasattr(server_response, "cruising_range_ac_off_km"): self.data[ DATA_RANGE_AC_OFF] = server_response.cruising_range_ac_off_km else: self.data[DATA_RANGE_AC_OFF] = None self.data[DATA_PLUGGED_IN] = server_response.is_connected self.data[DATA_CHARGING] = server_response.is_charging async_dispatcher_send(self.opp, SIGNAL_UPDATE_LEAF) self.last_battery_response = utcnow() # Climate response only updated if battery data updated first. if server_response is not None: try: climate_response = await self.async_get_climate() if climate_response is not None: _LOGGER.debug("Got climate data for Leaf: %s", climate_response.__dict__) self.data[DATA_CLIMATE] = climate_response.is_hvac_running self.last_climate_response = utcnow() except CarwingsError: _LOGGER.error("Error fetching climate info") self.request_in_progress = False async_dispatcher_send(self.opp, SIGNAL_UPDATE_LEAF)
async def trace_action(opp, script_run, stop, variables): """Trace action execution.""" path = trace_path_get() trace_element = action_trace_append(variables, path) trace_stack_push(trace_stack_cv, trace_element) trace_id = trace_id_get() if trace_id: key = trace_id[0] run_id = trace_id[1] breakpoints = opp.data[DATA_SCRIPT_BREAKPOINTS] if key in breakpoints and ( (run_id in breakpoints[key] and (path in breakpoints[key][run_id] or NODE_ANY in breakpoints[key][run_id])) or (RUN_ID_ANY in breakpoints[key] and (path in breakpoints[key][RUN_ID_ANY] or NODE_ANY in breakpoints[key][RUN_ID_ANY]))): async_dispatcher_send(opp, SCRIPT_BREAKPOINT_HIT, key, run_id, path) done = asyncio.Event() @callback def async_continue_stop(command=None): if command == "stop": stop.set() done.set() signal = SCRIPT_DEBUG_CONTINUE_STOP.format(key, run_id) remove_signal1 = async_dispatcher_connect(opp, signal, async_continue_stop) remove_signal2 = async_dispatcher_connect( opp, SCRIPT_DEBUG_CONTINUE_ALL, async_continue_stop) tasks = [ opp.async_create_task(flag.wait()) for flag in (stop, done) ] await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) for task in tasks: task.cancel() remove_signal1() remove_signal2() try: yield trace_element except _StopScript as ex: trace_element.set_error(ex.__cause__ or ex) raise ex except Exception as ex: trace_element.set_error(ex) raise ex finally: trace_stack_pop(trace_stack_cv)
def group_member_removed(self, zigpy_group: ZigpyGroupType, endpoint: ZigpyEndpointType) -> None: """Handle zigpy group member removed event.""" # need to handle endpoint correctly on groups zha_group = self._async_get_or_create_group(zigpy_group) zha_group.info("group_member_removed - endpoint: %s", endpoint) self._send_group_gateway_message(zigpy_group, ZHA_GW_MSG_GROUP_MEMBER_REMOVED) async_dispatcher_send( self._opp, f"{SIGNAL_GROUP_MEMBERSHIP_CHANGE}_0x{zigpy_group.group_id:04x}")
def reconnect(exc=None): """Schedule reconnect after connection has been unexpectedly lost.""" # Reset protocol binding before starting reconnect RflinkCommand.set_rflink_protocol(None) async_dispatcher_send(opp, SIGNAL_AVAILABILITY, False) # If OPP is not stopping, initiate new connection if opp.state != CoreState.stopping: _LOGGER.warning("Disconnected from Rflink, reconnecting") opp.async_create_task(connect())
async def async_sense_update(_): """Retrieve latest state.""" try: await gateway.update_realtime() except SenseAPITimeoutException: _LOGGER.error("Timeout retrieving data") data = gateway.get_realtime() if "devices" in data: sense_devices_data.set_devices_data(data["devices"]) async_dispatcher_send(opp, f"{SENSE_DEVICE_UPDATE}-{gateway.sense_monitor_id}")
async def set_max_mod(call): """Set the max modulation level.""" gw_dev = opp.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][ call.data[ATTR_GW_ID]] gw_var = gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD level = call.data[ATTR_LEVEL] if level == -1: # Backend only clears setting on non-numeric values. level = "-" value = await gw_dev.gateway.set_max_relative_mod(level) gw_dev.status.update({gw_var: value}) async_dispatcher_send(opp, gw_dev.update_signal, gw_dev.status)
async def async_step_add_x10(self, user_input=None): """Add an X10 device.""" errors = {} if user_input is not None: options = add_x10_device({**self.config_entry.options}, user_input) async_dispatcher_send(self.opp, SIGNAL_ADD_X10_DEVICE, user_input) return self.async_create_entry(title="", data=options) schema_defaults = user_input if user_input is not None else {} data_schema = build_x10_schema(**schema_defaults) return self.async_show_form( step_id=STEP_ADD_X10, data_schema=data_schema, errors=errors )
async def handle_webhook(opp, webhook_id, request): """Handle webhook callback.""" try: data = await request.json() _LOGGER.debug("Webhook %s: %s", webhook_id, data) except ValueError: return None if isinstance(data, dict): data["webhook_id"] = webhook_id async_dispatcher_send(opp, SIGNAL_WEBHOOK, data, data.get("hook_id")) opp.bus.async_fire(EVENT_RECEIVED, data)
async def _async_update_data(*_: Any) -> None: """Update all sensors in one executor jump.""" if _update_lock.locked(): _LOGGER.warning( "Updating systemmonitor took longer than the scheduled update interval %s", scan_interval, ) return async with _update_lock: await opp.async_add_executor_job(_update_sensors) async_dispatcher_send(opp, SIGNAL_SYSTEMMONITOR_UPDATE)
async def async_handle_addr_update(opp, context, msg): """Handle an addressable sensor update.""" _LOGGER.debug("[addr handler] context: %s msg: %s", context, msg) addr, temp = msg.get("addr"), msg.get("temp") entity_id = context.get(addr) if entity_id: async_dispatcher_send(opp, f"konnected.{entity_id}.update", temp) else: msg["device_id"] = context.get("device_id") msg["temperature"] = temp msg["addr"] = addr async_dispatcher_send(opp, SIGNAL_DS18B20_NEW, msg)
def cluster_command(self, tsn, command_id, args): """Handle commands received to this cluster.""" if command_id == 0: state = args[0] & 3 async_dispatcher_send(self._zha_device.opp, f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", state) self.debug("Updated alarm state: %s", state) elif command_id == 1: self.debug("Enroll requested") res = self._cluster.enroll_response(0, 0) self._zha_device.opp.async_create_task(res)