def __init__(self, opp: OpenPeerPower, device: Device) -> None: """Initialize the data update coordinator.""" DataUpdateCoordinator.__init__( self, opp, _LOGGER, name=f"{DOMAIN}-{device.device_info.name}", update_interval=timedelta(seconds=60), ) self.device = device self._error_count = 0
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool: """Set up FireServiceRota from a config entry.""" opp.data.setdefault(DOMAIN, {}) client = FireServiceRotaClient(opp, entry) await client.setup() if client.token_refresh_failure: return False async def async_update_data(): return await client.async_update() coordinator = DataUpdateCoordinator( opp, _LOGGER, name="duty binary sensor", update_method=async_update_data, update_interval=MIN_TIME_BETWEEN_UPDATES, ) await coordinator.async_config_entry_first_refresh() opp.data[DOMAIN][entry.entry_id] = { DATA_CLIENT: client, DATA_COORDINATOR: coordinator, } opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry): """Set up Atag integration from a config entry.""" async def _async_update_data(): """Update data via library.""" with async_timeout.timeout(20): try: await atag.update() except AtagException as err: raise UpdateFailed(err) from err return atag atag = AtagOne(session=async_get_clientsession(opp), **entry.data, device=entry.unique_id) coordinator = DataUpdateCoordinator( opp, _LOGGER, name=DOMAIN.title(), update_method=_async_update_data, update_interval=timedelta(seconds=60), ) await coordinator.async_config_entry_first_refresh() opp.data.setdefault(DOMAIN, {})[entry.entry_id] = coordinator if entry.unique_id is None: opp.config_entries.async_update_entry(entry, unique_id=atag.id) opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
async def async_setup_entry(opp, entry): """Set up the Nuki entry.""" opp.data.setdefault(DOMAIN, {}) try: bridge = await opp.async_add_executor_job( NukiBridge, entry.data[CONF_HOST], entry.data[CONF_TOKEN], entry.data[CONF_PORT], True, DEFAULT_TIMEOUT, ) locks, openers = await opp.async_add_executor_job( _get_bridge_devices, bridge) except InvalidCredentialsException as err: raise exceptions.ConfigEntryAuthFailed from err except RequestException as err: raise exceptions.ConfigEntryNotReady from err async def async_update_data(): """Fetch data from Nuki bridge.""" try: # Note: asyncio.TimeoutError and aiohttp.ClientError are already # handled by the data update coordinator. async with async_timeout.timeout(10): await opp.async_add_executor_job(_update_devices, locks + openers) except InvalidCredentialsException as err: raise UpdateFailed( f"Invalid credentials for Bridge: {err}") from err except RequestException as err: raise UpdateFailed( f"Error communicating with Bridge: {err}") from err coordinator = DataUpdateCoordinator( opp, _LOGGER, # Name of the data. For logging purposes. name="nuki devices", update_method=async_update_data, # Polling interval. Will only be polled if there are subscribers. update_interval=UPDATE_INTERVAL, ) opp.data[DOMAIN][entry.entry_id] = { DATA_COORDINATOR: coordinator, DATA_BRIDGE: bridge, DATA_LOCKS: locks, DATA_OPENERS: openers, } # Fetch initial data so we have data when entities subscribe await coordinator.async_refresh() opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
async def async_setup_entry(opp, config_entry, async_add_entities): """Set up the binary sensors from a config entry.""" board_api = opp.data[DOMAIN][config_entry.entry_id] input_count = config_entry.data["input_count"] binary_sensors = [] async def async_update_data(): """Fetch data from API endpoint of board.""" async with async_timeout.timeout(5): return await board_api.get_inputs() coordinator = DataUpdateCoordinator( opp, _LOGGER, name="binary_sensor", update_method=async_update_data, update_interval=timedelta(seconds=DEFAULT_POLLING_INTERVAL_SEC), ) await coordinator.async_refresh() for i in range(1, int(input_count) + 1): binary_sensors.append( ProgettihwswBinarySensor( coordinator, f"Input #{i}", setup_input(board_api, i), ) ) async_add_entities(binary_sensors)
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool: """Set up Enphase Envoy from a config entry.""" config = entry.data name = config[CONF_NAME] envoy_reader = EnvoyReader( config[CONF_HOST], config[CONF_USERNAME], config[CONF_PASSWORD], inverters=True, async_client=get_async_client(opp), ) async def async_update_data(): """Fetch data from API endpoint.""" data = {} async with async_timeout.timeout(30): try: await envoy_reader.getData() except httpx.HTTPStatusError as err: raise ConfigEntryAuthFailed from err except httpx.HTTPError as err: raise UpdateFailed( f"Error communicating with API: {err}") from err for condition in SENSORS: if condition != "inverters": data[condition] = await getattr(envoy_reader, condition)() else: data[ "inverters_production"] = await envoy_reader.inverters_production( ) _LOGGER.debug("Retrieved data from API: %s", data) return data coordinator = DataUpdateCoordinator( opp, _LOGGER, name=f"envoy {name}", update_method=async_update_data, update_interval=SCAN_INTERVAL, ) try: await coordinator.async_config_entry_first_refresh() except ConfigEntryAuthFailed: envoy_reader.get_inverters = False await coordinator.async_config_entry_first_refresh() opp.data.setdefault(DOMAIN, {})[entry.entry_id] = { COORDINATOR: coordinator, NAME: name, } opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
async def async_setup_entry(opp, config_entry, async_add_entities): """Set up the switches from a config entry.""" board_api = opp.data[DOMAIN][config_entry.entry_id] relay_count = config_entry.data["relay_count"] switches = [] async def async_update_data(): """Fetch data from API endpoint of board.""" async with async_timeout.timeout(5): return await board_api.get_switches() coordinator = DataUpdateCoordinator( opp, _LOGGER, name="switch", update_method=async_update_data, update_interval=timedelta(seconds=DEFAULT_POLLING_INTERVAL_SEC), ) await coordinator.async_refresh() for i in range(1, int(relay_count) + 1): switches.append( ProgettihwswSwitch( coordinator, f"Relay #{i}", setup_switch(board_api, i, config_entry.data[f"relay_{str(i)}"]), )) async_add_entities(switches)
def _create_flume_device_coordinator(opp, flume_device): """Create a data coordinator for the flume device.""" async def _async_update_data(): """Get the latest data from the Flume.""" _LOGGER.debug("Updating Flume data") try: await opp.async_add_executor_job(flume_device.update_force) except Exception as ex: raise UpdateFailed( f"Error communicating with flume API: {ex}") from ex _LOGGER.debug( "Flume update details: %s", { "values": flume_device.values, "query_payload": flume_device.query_payload, }, ) return DataUpdateCoordinator( opp, _LOGGER, # Name of the data. For logging purposes. name=flume_device.device_id, update_method=_async_update_data, # Polling interval. Will only be polled if there are subscribers. update_interval=SCAN_INTERVAL, )
def create_coordinator_container_vm(opp, proxmox, host_name, node_name, vm_id, vm_type): """Create and return a DataUpdateCoordinator for a vm/container.""" async def async_update_data(): """Call the api and handle the response.""" def poll_api(): """Call the api.""" vm_status = call_api_container_vm(proxmox, node_name, vm_id, vm_type) return vm_status vm_status = await opp.async_add_executor_job(poll_api) if vm_status is None: _LOGGER.warning("Vm/Container %s unable to be found in node %s", vm_id, node_name) return None return parse_api_container_vm(vm_status) return DataUpdateCoordinator( opp, _LOGGER, name=f"proxmox_coordinator_{host_name}_{node_name}_{vm_id}", update_method=async_update_data, update_interval=timedelta(seconds=UPDATE_INTERVAL), )
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry): """Set up Wolf SmartSet Service from a config entry.""" username = entry.data[CONF_USERNAME] password = entry.data[CONF_PASSWORD] device_name = entry.data[DEVICE_NAME] device_id = entry.data[DEVICE_ID] gateway_id = entry.data[DEVICE_GATEWAY] _LOGGER.debug( "Setting up wolflink integration for device: %s (ID: %s, gateway: %s)", device_name, device_id, gateway_id, ) wolf_client = WolfClient(username, password) try: parameters = await fetch_parameters(wolf_client, gateway_id, device_id) except InvalidAuth: _LOGGER.debug("Authentication failed") return False async def async_update_data(): """Update all stored entities for Wolf SmartSet.""" try: values = await wolf_client.fetch_value(gateway_id, device_id, parameters) return {v.value_id: v.value for v in values} except ConnectError as exception: raise UpdateFailed( f"Error communicating with API: {exception}") from exception except FetchFailed as exception: raise UpdateFailed( f"Could not fetch values from server due to: {exception}" ) from exception except InvalidAuth as exception: raise UpdateFailed( "Invalid authentication during update.") from exception coordinator = DataUpdateCoordinator( opp, _LOGGER, name="wolflink", update_method=async_update_data, update_interval=timedelta(minutes=1), ) await coordinator.async_refresh() opp.data.setdefault(DOMAIN, {}) opp.data[DOMAIN][entry.entry_id] = {} opp.data[DOMAIN][entry.entry_id][PARAMETERS] = parameters opp.data[DOMAIN][entry.entry_id][COORDINATOR] = coordinator opp.data[DOMAIN][entry.entry_id][DEVICE_ID] = device_id opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
def __init__( self, opp: OpenPeerPower, profile: str, api: ConfigEntryWithingsApi, user_id: int, webhook_config: WebhookConfig, ) -> None: """Initialize the data manager.""" self._opp = opp self._api = api self._user_id = user_id self._profile = profile self._webhook_config = webhook_config self._notify_subscribe_delay = datetime.timedelta(seconds=5) self._notify_unsubscribe_delay = datetime.timedelta(seconds=1) self._is_available = True self._cancel_interval_update_interval: CALLBACK_TYPE | None = None self._cancel_configure_webhook_subscribe_interval: CALLBACK_TYPE | None = None self._api_notification_id = f"withings_{self._user_id}" self.subscription_update_coordinator = DataUpdateCoordinator( opp, _LOGGER, name="subscription_update_coordinator", update_interval=timedelta(minutes=120), update_method=self.async_subscribe_webhook, ) self.poll_data_update_coordinator = DataUpdateCoordinator[ Dict[MeasureType, Any] ]( opp, _LOGGER, name="poll_data_update_coordinator", update_interval=timedelta(minutes=120) if self._webhook_config.enabled else timedelta(minutes=10), update_method=self.async_get_all_data, ) self.webhook_update_coordinator = WebhookUpdateCoordinator( self._opp, self._user_id ) self._cancel_subscription_update: Callable[[], None] | None = None self._subscribe_webhook_run_count = 0
def async_setup(self) -> None: """Coordinator creation.""" self.coordinator = DataUpdateCoordinator( self.opp, LOGGER, name=str(self), update_method=self.async_update_data, update_interval=self.update_interval, )
async def async_setup_entry(opp, entry): """Set up Subaru from a config entry.""" config = entry.data websession = aiohttp_client.async_get_clientsession(opp) try: controller = SubaruAPI( websession, config[CONF_USERNAME], config[CONF_PASSWORD], config[CONF_DEVICE_ID], config[CONF_PIN], None, config[CONF_COUNTRY], update_interval=UPDATE_INTERVAL, fetch_interval=FETCH_INTERVAL, ) _LOGGER.debug("Using subarulink %s", controller.version) await controller.connect() except InvalidCredentials: _LOGGER.error("Invalid account") return False except SubaruException as err: raise ConfigEntryNotReady(err.message) from err vehicle_info = {} for vin in controller.get_vehicles(): vehicle_info[vin] = get_vehicle_info(controller, vin) async def async_update_data(): """Fetch data from API endpoint.""" try: return await refresh_subaru_data(entry, vehicle_info, controller) except SubaruException as err: raise UpdateFailed(err.message) from err coordinator = DataUpdateCoordinator( opp, _LOGGER, name=COORDINATOR_NAME, update_method=async_update_data, update_interval=timedelta(seconds=FETCH_INTERVAL), ) await coordinator.async_refresh() opp.data.setdefault(DOMAIN, {}) opp.data[DOMAIN][entry.entry_id] = { ENTRY_CONTROLLER: controller, ENTRY_COORDINATOR: coordinator, ENTRY_VEHICLES: vehicle_info, } opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry): """Configure the base Nexia device for Open Peer Power.""" conf = entry.data username = conf[CONF_USERNAME] password = conf[CONF_PASSWORD] brand = conf.get(CONF_BRAND, BRAND_NEXIA) state_file = opp.config.path(f"nexia_config_{username}.conf") try: nexia_home = await opp.async_add_executor_job( partial( NexiaHome, username=username, password=password, device_name=opp.config.location_name, state_file=state_file, brand=brand, ) ) except ConnectTimeout as ex: _LOGGER.error("Unable to connect to Nexia service: %s", ex) raise ConfigEntryNotReady from ex except HTTPError as http_ex: if is_invalid_auth_code(http_ex.response.status_code): _LOGGER.error( "Access error from Nexia service, please check credentials: %s", http_ex ) return False _LOGGER.error("HTTP error from Nexia service: %s", http_ex) raise ConfigEntryNotReady from http_ex async def _async_update_data(): """Fetch data from API endpoint.""" return await opp.async_add_executor_job(nexia_home.update) coordinator = DataUpdateCoordinator( opp, _LOGGER, name="Nexia update", update_method=_async_update_data, update_interval=timedelta(seconds=DEFAULT_UPDATE_RATE), ) opp.data.setdefault(DOMAIN, {}) opp.data[DOMAIN][entry.entry_id] = { NEXIA_DEVICE: nexia_home, UPDATE_COORDINATOR: coordinator, } opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
def __init__(self, bridge): """Initialize the sensor manager.""" self.bridge = bridge self._component_add_entities = {} self.current = {} self.current_events = {} self._enabled_platforms = ("binary_sensor", "sensor") self.coordinator = DataUpdateCoordinator( bridge.opp, _LOGGER, name="sensor", update_method=self.async_update_data, update_interval=self.SCAN_INTERVAL, request_refresh_debouncer=debounce.Debouncer( bridge.opp, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True), )
async def async_setup_entry(opp, entry): """Set up Pi-hole entry.""" name = entry.data[CONF_NAME] host = entry.data[CONF_HOST] use_tls = entry.data[CONF_SSL] verify_tls = entry.data[CONF_VERIFY_SSL] location = entry.data[CONF_LOCATION] api_key = entry.data.get(CONF_API_KEY) # For backward compatibility if CONF_STATISTICS_ONLY not in entry.data: opp.config_entries.async_update_entry( entry, data={**entry.data, CONF_STATISTICS_ONLY: not api_key} ) _LOGGER.debug("Setting up %s integration with host %s", DOMAIN, host) try: session = async_get_clientsession(opp, verify_tls) api = Hole( host, opp.loop, session, location=location, tls=use_tls, api_token=api_key, ) await api.get_data() except HoleError as ex: _LOGGER.warning("Failed to connect: %s", ex) raise ConfigEntryNotReady from ex async def async_update_data(): """Fetch data from API endpoint.""" try: await api.get_data() except HoleError as err: raise UpdateFailed(f"Failed to communicate with API: {err}") from err coordinator = DataUpdateCoordinator( opp, _LOGGER, name=name, update_method=async_update_data, update_interval=MIN_TIME_BETWEEN_UPDATES, ) opp.data[DOMAIN][entry.entry_id] = { DATA_KEY_API: api, DATA_KEY_COORDINATOR: coordinator, } opp.config_entries.async_setup_platforms(entry, _async_platforms(entry)) return True
def __init__(self, device): """Initialize the update manager.""" self.device = device self.coordinator = DataUpdateCoordinator( device.opp, _LOGGER, name=f"{device.name} ({device.api.model} at {device.api.host[0]})", update_method=self.async_update, update_interval=self.SCAN_INTERVAL, ) self.available = None self.last_update = None
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool: """Set up OVO Energy from a config entry.""" client = OVOEnergy() try: authenticated = await client.authenticate(entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]) except aiohttp.ClientError as exception: _LOGGER.warning(exception) raise ConfigEntryNotReady from exception if not authenticated: raise ConfigEntryAuthFailed async def async_update_data() -> OVODailyUsage: """Fetch data from OVO Energy.""" async with async_timeout.timeout(10): try: authenticated = await client.authenticate( entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]) except aiohttp.ClientError as exception: raise UpdateFailed(exception) from exception if not authenticated: raise ConfigEntryAuthFailed( "Not authenticated with OVO Energy") return await client.get_daily_usage( datetime.utcnow().strftime("%Y-%m")) coordinator = DataUpdateCoordinator( opp, _LOGGER, # Name of the data. For logging purposes. name="sensor", update_method=async_update_data, # Polling interval. Will only be polled if there are subscribers. update_interval=timedelta(seconds=3600), ) opp.data.setdefault(DOMAIN, {}) opp.data[DOMAIN][entry.entry_id] = { DATA_CLIENT: client, DATA_COORDINATOR: coordinator, } # Fetch initial data so we have data when entities subscribe await coordinator.async_config_entry_first_refresh() # Setup components opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
def __init__( self, opp: OpenPeerPower, logger: logging.Logger, api: AbstractGateApi, *, name: str, update_interval: timedelta, update_method: Callable[[], Awaitable] | None = None, request_refresh_debouncer: Debouncer | None = None, ) -> None: """Initialize the data update coordinator.""" DataUpdateCoordinator.__init__( self, opp, logger, name=name, update_interval=update_interval, update_method=update_method, request_refresh_debouncer=request_refresh_debouncer, ) self.api = api
async def async_setup_entry(opp, entry): """Set up IQVIA as config entry.""" opp.data.setdefault(DOMAIN, {}) coordinators = {} if not entry.unique_id: # If the config entry doesn't already have a unique ID, set one: opp.config_entries.async_update_entry( entry, **{"unique_id": entry.data[CONF_ZIP_CODE]} ) websession = aiohttp_client.async_get_clientsession(opp) client = Client(entry.data[CONF_ZIP_CODE], session=websession) async def async_get_data_from_api(api_coro): """Get data from a particular API coroutine.""" try: return await api_coro() except IQVIAError as err: raise UpdateFailed from err init_data_update_tasks = [] for sensor_type, api_coro in [ (TYPE_ALLERGY_FORECAST, client.allergens.extended), (TYPE_ALLERGY_INDEX, client.allergens.current), (TYPE_ALLERGY_OUTLOOK, client.allergens.outlook), (TYPE_ASTHMA_FORECAST, client.asthma.extended), (TYPE_ASTHMA_INDEX, client.asthma.current), (TYPE_DISEASE_FORECAST, client.disease.extended), (TYPE_DISEASE_INDEX, client.disease.current), ]: coordinator = coordinators[sensor_type] = DataUpdateCoordinator( opp, LOGGER, name=f"{entry.data[CONF_ZIP_CODE]} {sensor_type}", update_interval=DEFAULT_SCAN_INTERVAL, update_method=partial(async_get_data_from_api, api_coro), ) init_data_update_tasks.append(coordinator.async_refresh()) results = await asyncio.gather(*init_data_update_tasks, return_exceptions=True) if all(isinstance(result, Exception) for result in results): # The IQVIA API can be selectively flaky, meaning that any number of the setup # API calls could fail. We only retry integration setup if *all* of the initial # API calls fail: raise ConfigEntryNotReady() opp.data[DOMAIN].setdefault(DATA_COORDINATOR, {})[entry.entry_id] = coordinators opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry): """Set up JuiceNet from a config entry.""" config = entry.data session = async_get_clientsession(opp) access_token = config[CONF_ACCESS_TOKEN] api = Api(access_token, session) juicenet = JuiceNetApi(api) try: await juicenet.setup() except TokenError as error: _LOGGER.error("JuiceNet Error %s", error) return False except aiohttp.ClientError as error: _LOGGER.error("Could not reach the JuiceNet API %s", error) raise ConfigEntryNotReady from error if not juicenet.devices: _LOGGER.error("No JuiceNet devices found for this account") return False _LOGGER.info("%d JuiceNet device(s) found", len(juicenet.devices)) async def async_update_data(): """Update all device states from the JuiceNet API.""" for device in juicenet.devices: await device.update_state(True) return True coordinator = DataUpdateCoordinator( opp, _LOGGER, name="JuiceNet", update_method=async_update_data, update_interval=timedelta(seconds=30), ) opp.data[DOMAIN][entry.entry_id] = { JUICENET_API: juicenet, JUICENET_COORDINATOR: coordinator, } await coordinator.async_config_entry_first_refresh() opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
async def async_setup_entry(opp, entry): """Set up Tile as config entry.""" opp.data[DOMAIN][DATA_COORDINATOR][entry.entry_id] = {} opp.data[DOMAIN][DATA_TILE][entry.entry_id] = {} websession = aiohttp_client.async_get_clientsession(opp) try: client = await async_login( entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], session=websession, ) opp.data[DOMAIN][DATA_TILE][ entry.entry_id] = await client.async_get_tiles() except InvalidAuthError: LOGGER.error("Invalid credentials provided") return False except TileError as err: raise ConfigEntryNotReady("Error during integration setup") from err async def async_update_tile(tile): """Update the Tile.""" try: return await tile.async_update() except SessionExpiredError: LOGGER.info("Tile session expired; creating a new one") await client.async_init() except TileError as err: raise UpdateFailed(f"Error while retrieving data: {err}") from err coordinator_init_tasks = [] for tile_uuid, tile in opp.data[DOMAIN][DATA_TILE][entry.entry_id].items(): coordinator = opp.data[DOMAIN][DATA_COORDINATOR][ entry.entry_id][tile_uuid] = DataUpdateCoordinator( opp, LOGGER, name=tile.name, update_interval=DEFAULT_UPDATE_INTERVAL, update_method=partial(async_update_tile, tile), ) coordinator_init_tasks.append(coordinator.async_refresh()) await gather_with_concurrency(DEFAULT_INIT_TASK_LIMIT, *coordinator_init_tasks) opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
async def async_setup_platform(opp, config, async_add_entities, discovery_info=None): """Set up the tankerkoenig sensors.""" if discovery_info is None: return tankerkoenig = opp.data[DOMAIN] async def async_update_data(): """Fetch data from API endpoint.""" try: return await tankerkoenig.fetch_data() except LookupError as err: raise UpdateFailed("Failed to fetch data") from err coordinator = DataUpdateCoordinator( opp, _LOGGER, name=NAME, update_method=async_update_data, update_interval=tankerkoenig.update_interval, ) # Fetch initial data so we have data when entities subscribe await coordinator.async_refresh() stations = discovery_info.values() entities = [] for station in stations: for fuel in tankerkoenig.fuel_types: if fuel not in station: _LOGGER.warning("Station %s does not offer %s fuel", station["id"], fuel) continue sensor = FuelPriceSensor( fuel, station, coordinator, f"{NAME}_{station['name']}_{fuel}", tankerkoenig.show_on_map, ) entities.append(sensor) _LOGGER.debug("Added sensors %s", entities) async_add_entities(entities)
async def async_setup_entry(opp, entry, async_add_entities): """Set up the SRP Energy Usage sensor.""" # API object stored here by __init__.py is_time_of_use = False api = opp.data[SRP_ENERGY_DOMAIN] if entry and entry.data: is_time_of_use = entry.data["is_tou"] async def async_update_data(): """Fetch data from API endpoint. This is the place to pre-process the data to lookup tables so entities can quickly look up their data. """ try: # Fetch srp_energy data start_date = datetime.now() + timedelta(days=-1) end_date = datetime.now() with async_timeout.timeout(10): hourly_usage = await opp.async_add_executor_job( api.usage, start_date, end_date, is_time_of_use, ) previous_daily_usage = 0.0 for _, _, _, kwh, _ in hourly_usage: previous_daily_usage += float(kwh) return previous_daily_usage except (TimeoutError) as timeout_err: raise UpdateFailed("Timeout communicating with API") from timeout_err except (ConnectError, HTTPError, Timeout, ValueError, TypeError) as err: raise UpdateFailed(f"Error communicating with API: {err}") from err coordinator = DataUpdateCoordinator( opp, _LOGGER, name="sensor", update_method=async_update_data, update_interval=MIN_TIME_BETWEEN_UPDATES, ) # Fetch initial data so we have data when entities subscribe await coordinator.async_refresh() async_add_entities([SrpEntity(coordinator)])
async def async_init(self): """Initialize the data class.""" self.systems = await self._api.get_systems() for system in self.systems.values(): self._system_notifications[system.system_id] = set() self._opp.async_create_task( async_register_base_station(self._opp, system, self.config_entry.entry_id)) self.coordinator = DataUpdateCoordinator( self._opp, LOGGER, name=self.config_entry.data[CONF_USERNAME], update_interval=DEFAULT_SCAN_INTERVAL, update_method=self.async_update, )
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry): """Set up Dexcom from a config entry.""" try: dexcom = await opp.async_add_executor_job( Dexcom, entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], entry.data[CONF_SERVER] == SERVER_OUS, ) except AccountError: return False except SessionError as error: raise ConfigEntryNotReady from error if not entry.options: opp.config_entries.async_update_entry( entry, options={CONF_UNIT_OF_MEASUREMENT: MG_DL}) async def async_update_data(): try: return await opp.async_add_executor_job( dexcom.get_current_glucose_reading) except SessionError as error: raise UpdateFailed(error) from error opp.data.setdefault(DOMAIN, {}) opp.data[DOMAIN][entry.entry_id] = { COORDINATOR: DataUpdateCoordinator( opp, _LOGGER, name=DOMAIN, update_method=async_update_data, update_interval=SCAN_INTERVAL, ), UNDO_UPDATE_LISTENER: entry.add_update_listener(update_listener), } await opp.data[DOMAIN][entry.entry_id ][COORDINATOR].async_config_entry_first_refresh() opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
async def async_setup_entry(opp, config_entry, async_add_entities): """Set up UK Flood Monitoring Sensors.""" station_key = config_entry.data["station"] session = async_get_clientsession(opp=opp) measurements = set() async def async_update_data(): # DataUpdateCoordinator will handle aiohttp ClientErrors and timouts async with async_timeout.timeout(30): data = await get_station(session, station_key) measures = get_measures(data) entities = [] # Look to see if payload contains new measures for measure in measures: if measure["@id"] in measurements: continue if "latestReading" not in measure: # Don't create a sensor entity for a gauge that isn't available continue entities.append(Measurement(opp.data[DOMAIN][station_key], measure["@id"])) measurements.add(measure["@id"]) async_add_entities(entities) # Turn data.measures into a dict rather than a list so easier for entities to # find themselves. data["measures"] = {measure["@id"]: measure for measure in measures} return data opp.data[DOMAIN][station_key] = coordinator = DataUpdateCoordinator( opp, _LOGGER, name="sensor", update_method=async_update_data, update_interval=timedelta(seconds=15 * 60), ) # Fetch initial data so we have data when entities subscribe await coordinator.async_refresh()
def __init__(self, opp: OpenPeerPower, data: dict) -> None: """Initialize the Litter-Robot hub.""" self._data = data self.account = None self.logged_in = False async def _async_update_data() -> bool: """Update all device states from the Litter-Robot API.""" await self.account.refresh_robots() return True self.coordinator = DataUpdateCoordinator( opp, _LOGGER, name=DOMAIN, update_method=_async_update_data, update_interval=timedelta(seconds=UPDATE_INTERVAL_SECONDS), )
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry): """Set up NuHeat from a config entry.""" conf = entry.data username = conf[CONF_USERNAME] password = conf[CONF_PASSWORD] serial_number = conf[CONF_SERIAL_NUMBER] api = nuheat.NuHeat(username, password) try: thermostat = await opp.async_add_executor_job(_get_thermostat, api, serial_number) except requests.exceptions.Timeout as ex: raise ConfigEntryNotReady from ex except requests.exceptions.HTTPError as ex: if (ex.response.status_code > HTTP_BAD_REQUEST and ex.response.status_code < HTTP_INTERNAL_SERVER_ERROR): _LOGGER.error("Failed to login to nuheat: %s", ex) return False raise ConfigEntryNotReady from ex except Exception as ex: # pylint: disable=broad-except _LOGGER.error("Failed to login to nuheat: %s", ex) return False async def _async_update_data(): """Fetch data from API endpoint.""" await opp.async_add_executor_job(thermostat.get_data) coordinator = DataUpdateCoordinator( opp, _LOGGER, name=f"nuheat {serial_number}", update_method=_async_update_data, update_interval=timedelta(minutes=5), ) opp.data.setdefault(DOMAIN, {}) opp.data[DOMAIN][entry.entry_id] = (thermostat, coordinator) opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool: """Set up config entry.""" session = aiohttp_client.async_get_clientsession(opp) opp.data.setdefault(DOMAIN, {}) printer = SyncThru(entry.data[CONF_URL], session) async def async_update_data() -> SyncThru: """Fetch data from the printer.""" try: async with async_timeout.timeout(10): await printer.update() except ValueError as value_error: # if an exception is thrown, printer does not support syncthru raise UpdateFailed( f"Configured printer at {printer.url} does not respond. " "Please make sure it supports SyncThru and check your configuration." ) from value_error else: if printer.is_unknown_state(): raise ConfigEntryNotReady return printer coordinator: DataUpdateCoordinator = DataUpdateCoordinator( opp, _LOGGER, name=DOMAIN, update_method=async_update_data, update_interval=timedelta(seconds=30), ) opp.data[DOMAIN][entry.entry_id] = coordinator await coordinator.async_config_entry_first_refresh() device_registry = await dr.async_get_registry(opp) device_registry.async_get_or_create( config_entry_id=entry.entry_id, connections=device_connections(printer), identifiers=device_identifiers(printer), model=printer.model(), name=printer.hostname(), ) opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True