async def async_added_to_opp(self): """Run when about to be added to Opp.""" await super().async_added_to_opp() async_track_time_interval( self.opp, self._async_update_time, self.update_time_interval ) self._async_update_time()
async def async_setup(opp: OpenPeerPower, config: ConfigType) -> bool: """Create a Genius Hub system.""" opp.data[DOMAIN] = {} kwargs = dict(config[DOMAIN]) if CONF_HOST in kwargs: args = (kwargs.pop(CONF_HOST),) else: args = (kwargs.pop(CONF_TOKEN),) hub_uid = kwargs.pop(CONF_MAC, None) client = GeniusHub(*args, **kwargs, session=async_get_clientsession(opp)) broker = opp.data[DOMAIN]["broker"] = GeniusBroker(opp, client, hub_uid) try: await client.update() except aiohttp.ClientResponseError as err: _LOGGER.error("Setup failed, check your configuration, %s", err) return False broker.make_debug_log_entries() async_track_time_interval(opp, broker.async_update, SCAN_INTERVAL) for platform in ["climate", "water_heater", "sensor", "binary_sensor", "switch"]: opp.async_create_task(async_load_platform(opp, platform, DOMAIN, {}, config)) setup_service_functions(opp, broker) return True
async def async_setup(opp, config): """Set up the Kaiterra integration.""" conf = config[DOMAIN] scan_interval = conf[CONF_SCAN_INTERVAL] devices = conf[CONF_DEVICES] session = async_get_clientsession(opp) api = opp.data[DOMAIN] = KaiterraApiData(opp, conf, session) await api.async_update() async def _update(now=None): """Periodic update.""" await api.async_update() async_track_time_interval(opp, _update, scan_interval) # Load platforms for each device for device in devices: device_name, device_id = ( device.get(CONF_NAME) or device[CONF_TYPE], device[CONF_DEVICE_ID], ) for platform in PLATFORMS: opp.async_create_task( async_load_platform( opp, platform, DOMAIN, {CONF_NAME: device_name, CONF_DEVICE_ID: device_id}, config, ) ) return True
async def async_setup(opp, config): """Set up the iperf3 component.""" opp.data[DOMAIN] = {} conf = config[DOMAIN] for host in conf[CONF_HOSTS]: data = opp.data[DOMAIN][host[CONF_HOST]] = Iperf3Data(opp, host) if not conf[CONF_MANUAL]: async_track_time_interval(opp, data.update, conf[CONF_SCAN_INTERVAL]) def update(call): """Service call to manually update the data.""" called_host = call.data[ATTR_HOST] if called_host in opp.data[DOMAIN]: opp.data[DOMAIN][called_host].update() else: for iperf3_host in opp.data[DOMAIN].values(): iperf3_host.update() opp.services.async_register(DOMAIN, "speedtest", update, schema=SERVICE_SCHEMA) opp.async_create_task( async_load_platform(opp, SENSOR_DOMAIN, DOMAIN, conf[CONF_MONITORED_CONDITIONS], config)) return True
async def async_added_to_opp(self): """Start custom polling.""" @callback def async_update(event_time=None): """Update the entity.""" self.async_schedule_update_op_state(True) async_track_time_interval(self.opp, async_update, self._interval)
async def async_added_to_opp(self): """Run when about to be added to opp.""" await super().async_added_to_opp() await self.async_accept_signal(self._on_off_channel, SIGNAL_ATTR_UPDATED, self.async_set_state) if self._level_channel: await self.async_accept_signal(self._level_channel, SIGNAL_SET_LEVEL, self.set_level) async_track_time_interval(self.opp, self.refresh, SCAN_INTERVAL)
def __init__(self, opp: OpenPeerPower, config_entry: ConfigEntry, session): """Initialize the Minut data object.""" self._known_devices = set() self._known_homes = set() self._opp = opp self._config_entry = config_entry self._is_available = True self._client = session async_track_time_interval(self._opp, self.update, SCAN_INTERVAL)
async def async_init(self): """Further initialize connection to Traccar.""" await self._api.test_connection() if self._api.connected and not self._api.authenticated: _LOGGER.error("Authentication for Traccar failed") return False await self._async_update() async_track_time_interval(self._opp, self._async_update, self._scan_interval) return True
async def async_setup_entry(opp, config_entry): """Set up EcoNet as config entry.""" email = config_entry.data[CONF_EMAIL] password = config_entry.data[CONF_PASSWORD] try: api = await EcoNetApiInterface.login(email, password=password) except InvalidCredentialsError: _LOGGER.error("Invalid credentials provided") return False except PyeconetError as err: _LOGGER.error("Config entry failed: %s", err) raise ConfigEntryNotReady from err try: equipment = await api.get_equipment_by_type( [EquipmentType.WATER_HEATER, EquipmentType.THERMOSTAT]) except (ClientError, GenericHTTPError, InvalidResponseFormat) as err: raise ConfigEntryNotReady from err opp.data[DOMAIN][API_CLIENT][config_entry.entry_id] = api opp.data[DOMAIN][EQUIPMENT][config_entry.entry_id] = equipment opp.config_entries.async_setup_platforms(config_entry, PLATFORMS) api.subscribe() def update_published(): """Handle a push update.""" dispatcher_send(opp, PUSH_UPDATE) for _eqip in equipment[EquipmentType.WATER_HEATER]: _eqip.set_update_callback(update_published) for _eqip in equipment[EquipmentType.THERMOSTAT]: _eqip.set_update_callback(update_published) async def resubscribe(now): """Resubscribe to the MQTT updates.""" await opp.async_add_executor_job(api.unsubscribe) api.subscribe() async def fetch_update(now): """Fetch the latest changes from the API.""" await api.refresh_equipment() config_entry.async_on_unload( async_track_time_interval(opp, resubscribe, INTERVAL)) config_entry.async_on_unload( async_track_time_interval(opp, fetch_update, INTERVAL + timedelta(minutes=1))) return True
async def async_setup(self): """Set up the Netatmo data handler.""" async_track_time_interval(self.opp, self.async_update, timedelta(seconds=SCAN_INTERVAL)) self.listeners.append( async_dispatcher_connect( self.opp, f"signal-{DOMAIN}-webhook-None", self.handle_event, ))
async def async_setup_platform(opp, config, async_add_entities, discovery_info=None): """Set up the CityBikes platform.""" if PLATFORM not in opp.data: opp.data[PLATFORM] = {MONITORED_NETWORKS: {}} latitude = config.get(CONF_LATITUDE, opp.config.latitude) longitude = config.get(CONF_LONGITUDE, opp.config.longitude) network_id = config.get(CONF_NETWORK) stations_list = set(config.get(CONF_STATIONS_LIST, [])) radius = config.get(CONF_RADIUS, 0) name = config[CONF_NAME] if not opp.config.units.is_metric: radius = distance.convert(radius, LENGTH_FEET, LENGTH_METERS) # Create a single instance of CityBikesNetworks. networks = opp.data.setdefault(CITYBIKES_NETWORKS, CityBikesNetworks(opp)) if not network_id: network_id = await networks.get_closest_network_id(latitude, longitude) if network_id not in opp.data[PLATFORM][MONITORED_NETWORKS]: network = CityBikesNetwork(opp, network_id) opp.data[PLATFORM][MONITORED_NETWORKS][network_id] = network opp.async_create_task(network.async_refresh()) async_track_time_interval(opp, network.async_refresh, SCAN_INTERVAL) else: network = opp.data[PLATFORM][MONITORED_NETWORKS][network_id] await network.ready.wait() devices = [] for station in network.stations: dist = location.distance(latitude, longitude, station[ATTR_LATITUDE], station[ATTR_LONGITUDE]) station_id = station[ATTR_ID] station_uid = str(station.get(ATTR_EXTRA, {}).get(ATTR_UID, "")) if radius > dist or stations_list.intersection( (station_id, station_uid)): if name: uid = "_".join([network.network_id, name, station_id]) else: uid = "_".join([network.network_id, station_id]) entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, uid, opp=opp) devices.append(CityBikesStation(network, station_id, entity_id)) async_add_entities(devices, True)
async def async_setup_platform(opp, config, async_add_entities, discovery_info=None): """Set up of Fronius platform.""" session = async_get_clientsession(opp) fronius = Fronius(session, config[CONF_RESOURCE]) scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL) adapters = [] # Creates all adapters for monitored conditions for condition in config[CONF_MONITORED_CONDITIONS]: device = condition[CONF_DEVICE] sensor_type = condition[CONF_SENSOR_TYPE] scope = condition[CONF_SCOPE] name = "Fronius {} {} {}".format( condition[CONF_SENSOR_TYPE].replace("_", " ").capitalize(), device if scope == SCOPE_DEVICE else SCOPE_SYSTEM, config[CONF_RESOURCE], ) if sensor_type == TYPE_INVERTER: if scope == SCOPE_SYSTEM: adapter_cls = FroniusInverterSystem else: adapter_cls = FroniusInverterDevice elif sensor_type == TYPE_METER: if scope == SCOPE_SYSTEM: adapter_cls = FroniusMeterSystem else: adapter_cls = FroniusMeterDevice elif sensor_type == TYPE_POWER_FLOW: adapter_cls = FroniusPowerFlow else: adapter_cls = FroniusStorage adapters.append(adapter_cls(fronius, name, device, async_add_entities)) # Creates a lamdba that fetches an update when called def adapter_data_fetcher(data_adapter): async def fetch_data(*_): await data_adapter.async_update() return fetch_data # Set up the fetching in a fixed interval for each adapter for adapter in adapters: fetch = adapter_data_fetcher(adapter) # fetch data once at set-up await fetch() async_track_time_interval(opp, fetch, scan_interval)
async def async_setup(self): """Set up the heartbeat.""" if self._unsubscribe is None: await self.async_heartbeat(dt.datetime.now()) self._unsubscribe = event.async_track_time_interval( self._opp, self.async_heartbeat, self.HEARTBEAT_INTERVAL )
async def _login(opp, modem_data, password): """Log in and complete setup.""" await modem_data.modem.login(password=password) def fire_sms_event(sms): """Send an SMS event.""" data = { ATTR_HOST: modem_data.host, ATTR_SMS_ID: sms.id, ATTR_FROM: sms.sender, ATTR_MESSAGE: sms.message, } opp.bus.async_fire(EVENT_SMS, data) await modem_data.modem.add_sms_listener(fire_sms_event) await modem_data.async_update() opp.data[DATA_KEY].modem_data[modem_data.host] = modem_data async def _update(now): """Periodic update.""" await modem_data.async_update() update_unsub = async_track_time_interval(opp, _update, SCAN_INTERVAL) async def cleanup(event): """Clean up resources.""" update_unsub() await modem_data.modem.logout() del opp.data[DATA_KEY].modem_data[modem_data.host] opp.bus.async_listen_once(EVENT_OPENPEERPOWER_STOP, cleanup)
def __init__( self, opp: OpenPeerPower, zigpy_device: zha_typing.ZigpyDeviceType, zha_gateway: zha_typing.ZhaGatewayType, ) -> None: """Initialize the gateway.""" self.opp = opp self._zigpy_device = zigpy_device self._zha_gateway = zha_gateway self._available = False self._available_signal = f"{self.name}_{self.ieee}_{SIGNAL_AVAILABLE}" self._checkins_missed_count = 0 self.unsubs = [] self.quirk_applied = isinstance(self._zigpy_device, zigpy.quirks.CustomDevice) self.quirk_class = (f"{self._zigpy_device.__class__.__module__}." f"{self._zigpy_device.__class__.__name__}") if self.is_mains_powered: self._consider_unavailable_time = CONSIDER_UNAVAILABLE_MAINS else: self._consider_unavailable_time = CONSIDER_UNAVAILABLE_BATTERY keep_alive_interval = random.randint(*_UPDATE_ALIVE_INTERVAL) self.unsubs.append( async_track_time_interval(self.opp, self._check_available, timedelta(seconds=keep_alive_interval))) self._op_device_id = None self.status = DeviceStatus.CREATED self._channels = channels.Channels(self)
async def async_setup_entry( opp: OpenPeerPower, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up Kuler sky light devices.""" async def discover(*args): """Attempt to discover new lights.""" lights = await pykulersky.discover() # Filter out already discovered lights new_lights = [ light for light in lights if light.address not in opp.data[DOMAIN][DATA_ADDRESSES] ] new_entities = [] for light in new_lights: opp.data[DOMAIN][DATA_ADDRESSES].add(light.address) new_entities.append(KulerskyLight(light)) async_add_entities(new_entities, update_before_add=True) # Start initial discovery opp.async_create_task(discover()) # Perform recurring discovery of new devices opp.data[DOMAIN][DATA_DISCOVERY_SUBSCRIPTION] = async_track_time_interval( opp, discover, DISCOVERY_INTERVAL )
async def async_setup_entry( opp: OpenPeerPower, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up Zerproc light devices.""" warned = False async def discover(*args): """Wrap discovery to include params.""" nonlocal warned try: entities = await discover_entities(opp) async_add_entities(entities, update_before_add=True) warned = False except pyzerproc.ZerprocException: if warned is False: _LOGGER.warning("Error discovering Zerproc lights", exc_info=True) warned = True # Initial discovery opp.async_create_task(discover()) # Perform recurring discovery of new devices opp.data[DOMAIN][DATA_DISCOVERY_SUBSCRIPTION] = async_track_time_interval( opp, discover, DISCOVERY_INTERVAL)
async def async_start(self): """Start scanning for new devices on the network.""" self._discover_hosts = DiscoverHosts() self._unsub = async_track_time_interval(self.opp, self.async_start_discover, SCAN_INTERVAL) self.async_start_discover()
async def async_track_device(self, device, update_callback): """Track a device.""" if not self.devices: self._unsub_interval = async_track_time_interval( self.opp, self.refresh_all, self.update_interval) if device.device_id not in self.devices: self.devices[device.device_id] = { "device": device, "update_callbacks": [update_callback], "data": None, } # Store task so that other concurrent requests can wait for us to finish and # data be available. self.devices[device.device_id]["task"] = asyncio.current_task() self.devices[device.device_id][ "data"] = await self.opp.async_add_executor_job( self.update_method, device) self.devices[device.device_id].pop("task") else: self.devices[device.device_id]["update_callbacks"].append( update_callback) # If someone is currently fetching data as part of the initialization, wait for them if "task" in self.devices[device.device_id]: await self.devices[device.device_id]["task"] update_callback(self.devices[device.device_id]["data"])
def connect(self): """Connect handlers/listeners for device/lifecycle events.""" # Setup interval to regenerate the refresh token on a periodic basis. # Tokens expire in 30 days and once expired, cannot be recovered. async def regenerate_refresh_token(now): """Generate a new refresh token and update the config entry.""" await self._token.refresh( self._entry.data[CONF_CLIENT_ID], self._entry.data[CONF_CLIENT_SECRET], ) self._opp.config_entries.async_update_entry( self._entry, data={ **self._entry.data, CONF_REFRESH_TOKEN: self._token.refresh_token, }, ) _LOGGER.debug( "Regenerated refresh token for installed app: %s", self._installed_app_id, ) self._regenerate_token_remove = async_track_time_interval( self._opp, regenerate_refresh_token, TOKEN_REFRESH_INTERVAL) # Connect handler to incoming device events self._event_disconnect = self._smart_app.connect_event( self._event_handler)
def __init__(self, opp, zigpy_device, zha_gateway): """Initialize the gateway.""" self.opp = opp self._zigpy_device = zigpy_device self._zha_gateway = zha_gateway self.cluster_channels = {} self._relay_channels = {} self._all_channels = [] self._available = False self._available_signal = "{}_{}_{}".format(self.name, self.ieee, SIGNAL_AVAILABLE) self._checkins_missed_count = 2 self._unsub = async_dispatcher_connect(self.opp, self._available_signal, self.async_initialize) self.quirk_applied = isinstance(self._zigpy_device, zigpy.quirks.CustomDevice) self.quirk_class = "{}.{}".format( self._zigpy_device.__class__.__module__, self._zigpy_device.__class__.__name__, ) self._available_check = async_track_time_interval( self.opp, self._check_available, _UPDATE_ALIVE_INTERVAL) self._op_device_id = None self.status = DeviceStatus.CREATED
def async_load_devices(self) -> None: """Restore ZHA devices from zigpy application state.""" for zigpy_device in self.application_controller.devices.values(): zha_device = self._async_get_or_create_device(zigpy_device, restored=True) if zha_device.nwk == 0x0000: self.coordinator_zha_device = zha_device zha_dev_entry = self.zha_storage.devices.get(str( zigpy_device.ieee)) delta_msg = "not known" if zha_dev_entry and zha_dev_entry.last_seen is not None: delta = round(time.time() - zha_dev_entry.last_seen) if zha_device.is_mains_powered: zha_device.available = delta < CONSIDER_UNAVAILABLE_MAINS else: zha_device.available = delta < CONSIDER_UNAVAILABLE_BATTERY delta_msg = f"{str(timedelta(seconds=delta))} ago" _LOGGER.debug( "[%s](%s) restored as '%s', last seen: %s", zha_device.nwk, zha_device.name, "available" if zha_device.available else "unavailable", delta_msg, ) # update the last seen time for devices every 10 minutes to avoid thrashing # writes and shutdown issues where storage isn't updated self._unsubs.append( async_track_time_interval(self._opp, self.async_update_device_storage, timedelta(minutes=10)))
def async_add_listener(self, update_callback): """Listen for data updates.""" # This is the first listener, set up interval. if not self.listeners: self._unsub_interval = async_track_time_interval( self.opp, self.async_refresh_all, self.update_interval) self.listeners.append(update_callback)
def async_setup_dump(self, *args: Any) -> None: """Set up the restore state listeners.""" @callback def _async_dump_states(*_: Any) -> None: self.opp.async_create_task(self.async_dump_states()) # Dump the initial states now. This helps minimize the risk of having # old states loaded by overwriting the last states once Open Peer Power # has started and the old states have been read. _async_dump_states() # Dump states periodically async_track_time_interval(self.opp, _async_dump_states, STATE_DUMP_INTERVAL) # Dump states when stopping opp self.opp.bus.async_listen_once(EVENT_OPENPEERPOWER_STOP, _async_dump_states)
async def async_setup(self): """Set up the device.""" async def _async_update(_): await self._opp.async_add_executor_job(self.update) await _async_update(None) self._remove_time_tracker = async_track_time_interval( self._opp, _async_update, self._opp.data[DOMAIN][DATA_SCAN_INTERVAL])
async def async_start(self, options): """Start FritzHosts connection.""" self.fritz_hosts = FritzHosts(fc=self.connection) self._options = options await self.opp.async_add_executor_job(self.scan_devices) self._cancel_scan = async_track_time_interval( self.opp, self.scan_devices, timedelta(seconds=TRACKER_SCAN_INTERVAL) )
async def async_setup(opp, config): """Set up the Fast.com component.""" conf = config[DOMAIN] data = opp.data[DOMAIN] = SpeedtestData(opp) if not conf[CONF_MANUAL]: async_track_time_interval(opp, data.update, conf[CONF_SCAN_INTERVAL]) def update(call=None): """Service call to manually update the data.""" data.update() opp.services.async_register(DOMAIN, "speedtest", update) opp.async_create_task( async_load_platform(opp, "sensor", DOMAIN, {}, config)) return True
async def async_added_to_opp(self) -> None: """Subscribe to BPUP and start polling.""" await super().async_added_to_opp() self._update_lock = Lock() self._bpup_subs.subscribe(self._device_id, self._async_bpup_callback) self.async_on_remove( async_track_time_interval(self.opp, self._async_update_if_bpup_not_alive, _FALLBACK_SCAN_INTERVAL))
def async_setup_sabnzbd(opp, sab_api, config, name): """Set up SABnzbd sensors and services.""" sab_api_data = SabnzbdApiData(sab_api, name, config.get(CONF_SENSORS, {})) if config.get(CONF_SENSORS): opp.data[DATA_SABNZBD] = sab_api_data opp.async_create_task( discovery.async_load_platform(opp, "sensor", DOMAIN, {}, config)) async def async_service_handler(service): """Handle service calls.""" if service.service == SERVICE_PAUSE: await sab_api_data.async_pause_queue() elif service.service == SERVICE_RESUME: await sab_api_data.async_resume_queue() elif service.service == SERVICE_SET_SPEED: speed = service.data.get(ATTR_SPEED) await sab_api_data.async_set_queue_speed(speed) opp.services.async_register(DOMAIN, SERVICE_PAUSE, async_service_handler, schema=vol.Schema({})) opp.services.async_register(DOMAIN, SERVICE_RESUME, async_service_handler, schema=vol.Schema({})) opp.services.async_register(DOMAIN, SERVICE_SET_SPEED, async_service_handler, schema=SPEED_LIMIT_SCHEMA) async def async_update_sabnzbd(now): """Refresh SABnzbd queue data.""" try: await sab_api.refresh_data() async_dispatcher_send(opp, SIGNAL_SABNZBD_UPDATED, None) except SabnzbdApiException as err: _LOGGER.error(err) async_track_time_interval(opp, async_update_sabnzbd, UPDATE_INTERVAL)
async def async_setup_sensor_registry_updates( opp: OpenPeerPower, sensor_registry: dict[tuple[str, str], SensorData], scan_interval: datetime.timedelta, ) -> None: """Update the registry and create polling.""" _update_lock = asyncio.Lock() def _update_sensors() -> None: """Update sensors and store the result in the registry.""" for (type_, argument), data in sensor_registry.items(): try: state, value, update_time = _update(type_, data) except Exception as ex: # pylint: disable=broad-except _LOGGER.exception("Error updating sensor: %s (%s)", type_, argument) data.last_exception = ex else: data.state = state data.value = value data.update_time = update_time data.last_exception = None # Only fetch these once per iteration as we use the same # data source multiple times in _update _disk_usage.cache_clear() _swap_memory.cache_clear() _virtual_memory.cache_clear() _net_io_counters.cache_clear() _net_if_addrs.cache_clear() _getloadavg.cache_clear() async def _async_update_data(*_: Any) -> None: """Update all sensors in one executor jump.""" if _update_lock.locked(): _LOGGER.warning( "Updating systemmonitor took longer than the scheduled update interval %s", scan_interval, ) return async with _update_lock: await opp.async_add_executor_job(_update_sensors) async_dispatcher_send(opp, SIGNAL_SYSTEMMONITOR_UPDATE) polling_remover = async_track_time_interval(opp, _async_update_data, scan_interval) @callback def _async_stop_polling(*_: Any) -> None: polling_remover() opp.bus.async_listen_once(EVENT_OPENPEERPOWER_STOP, _async_stop_polling) await _async_update_data()