def __init__(self, hass, config): self.hass = hass self._name = "gmproxy_player" self._media_player = "input_select." + config.get( CONF_SPEAKERS, DEFAULT_SPEAKERS) self._play_mode = "input_select." + config.get(CONF_PLAY_MODE, DEFAULT_PLAY_MODE) self._gmproxyurl = config.get(CONF_GMPROXYURL, DEFAULT_GMPROXYURL) hass.bus.listen_once(EVENT_HOMEASSISTANT_START, self._update_media_players) ## search for speakers, wait 10 sec to get them discovered first call_later(self.hass, 10, self._update_media_players) ## after first search, period of 60 sec should be fine SCAN_INTERVAL = timedelta(seconds=60) track_time_interval(self.hass, self._update_media_players, SCAN_INTERVAL) self._speaker = None self._attributes = {} self._unsub_speaker_tracker = None self._playing = False self._current_track = None self._state = STATE_OFF self._volume = 0.0 self._is_mute = False self._track_name = None self._track_artist = None self._track_album_name = None self._track_album_cover = None self._track_artist_cover = None self._attributes['_player_state'] = STATE_OFF
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the available Netatmo weather sensors.""" dev = [] auth = hass.data[DATA_NETATMO_AUTH] if config.get(CONF_AREAS) is not None: for area in config[CONF_AREAS]: data = NetatmoPublicData( auth, lat_ne=area[CONF_LAT_NE], lon_ne=area[CONF_LON_NE], lat_sw=area[CONF_LAT_SW], lon_sw=area[CONF_LON_SW] ) for sensor_type in area[CONF_MONITORED_CONDITIONS]: dev.append(NetatmoPublicSensor( area[CONF_NAME], data, sensor_type, area[CONF_MODE] )) else: def _retry(_data): try: _dev = find_devices(_data) except requests.exceptions.Timeout: return call_later(hass, NETATMO_UPDATE_INTERVAL, lambda _: _retry(_data)) if _dev: add_entities(_dev, True) import pyatmo for data_class in [pyatmo.WeatherStationData, pyatmo.HomeCoachData]: try: data = NetatmoData(auth, data_class, config.get(CONF_STATION)) except pyatmo.NoDevice: _LOGGER.warning( "No %s devices found", NETATMO_DEVICE_TYPES[data_class.__name__] ) continue # Test if manually configured if CONF_MODULES in config: module_items = config[CONF_MODULES].items() for module_name, monitored_conditions in module_items: for condition in monitored_conditions: dev.append(NetatmoSensor( data, module_name, condition.lower(), config.get(CONF_STATION))) continue # otherwise add all modules and conditions try: dev.extend(find_devices(data)) except requests.exceptions.Timeout: call_later(hass, NETATMO_UPDATE_INTERVAL, lambda _: _retry(data)) if dev: add_entities(dev, True)
def update(self): """Get the latest details on a media player. Because media players spend the majority of time idle, an adaptive update should be used to avoid flooding Amazon focusing on known play states. An initial version included an update_devices call on every update. However, this quickly floods the network for every new device added. This should only call refresh() to call the AlexaAPI. """ if (self._device is None or self.entity_id is None): # Device has not initialized yet return self.refresh(no_throttle=True) if self.state in [STATE_PLAYING]: self._should_poll = False # disable polling since manual update if(self._last_update == 0 or util.dt.as_timestamp(util.utcnow()) - util.dt.as_timestamp(self._last_update) > PLAY_SCAN_INTERVAL): _LOGGER.debug("%s playing; scheduling update in %s seconds", self.name, PLAY_SCAN_INTERVAL) call_later(self.hass, PLAY_SCAN_INTERVAL, lambda _: self.schedule_update_ha_state(force_refresh=True)) elif self._should_poll: # Not playing, one last poll self._should_poll = False _LOGGER.debug("Disabling polling and scheduling last update in 300" " seconds for %s", self.name) call_later(self.hass, 300, lambda _: self.schedule_update_ha_state(force_refresh=True)) self._last_update = util.utcnow() self.schedule_update_ha_state()
def setup(hass, config): """Set up the InfluxDB component.""" conf = config[DOMAIN] try: influx = get_influx_connection(conf, test_write=True) except ConnectionError as exc: _LOGGER.error(RETRY_MESSAGE, exc) event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config)) return True event_to_json = _generate_event_to_json(conf) max_tries = conf.get(CONF_RETRY_COUNT) instance = hass.data[DOMAIN] = InfluxThread(hass, influx, event_to_json, max_tries) instance.start() def shutdown(event): """Shut down the thread.""" instance.queue.put(None) instance.join() influx.close() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown) return True
def __init__(self, hass, username, password, webdriver, wait_time, tmpdir, scan_interval): """Initialise the VeoliaIDF account.""" self._username = username self.__password = password self._webdriver = webdriver self._wait_time = wait_time self._tmpdir = tmpdir self._scan_interval = scan_interval self._data = None self.sensors = [] call_later(hass, 5, self.update_veolia_data) self.sensors.append( VeoliaIDFSensor(HA_PERIOD_START_TIME, PropertyNameEnum.TIME.value, None, BEFORE_LAST_INDEX, self)) self.sensors.append( VeoliaIDFSensor(HA_PERIOD_END_TIME, PropertyNameEnum.TIME.value, None, LAST_INDEX, self)) self.sensors.append( VeoliaIDFSensor(HA_YESTERDAY_LITER, PropertyNameEnum.DAILY_LITER.value, VOLUME_LITERS, LAST_INDEX, self)) self.sensors.append( VeoliaIDFSensor(HA_TOTAL_LITER, PropertyNameEnum.TOTAL_LITER.value, VOLUME_LITERS, LAST_INDEX, self)) track_time_interval(hass, self.update_veolia_data, self._scan_interval)
def play_media(self, media_type, media_id, **kwargs): if not self._update_entity_ids(): return _player = self.hass.states.get(self._entity_ids) if media_type == "station": _source = {"option":"Station", "entity_id": self._source} _option = {"option": media_id, "entity_id": self._station} self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, _source) self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, _option) elif media_type == "playlist": _source = {"option":"Playlist", "entity_id": self._source} _option = {"option": media_id, "entity_id": self._playlist} self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, _source) self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, _option) else: _LOGGER.error("Invalid: (%s) --> media_types are 'station' or 'playlist'.", media_type) return if self._playing == True: self.media_stop() self.media_play() elif self._playing == False and self._state == STATE_OFF: if _player.state == STATE_OFF: self.turn_on() else: data = {ATTR_ENTITY_ID: _player.entity_id} self._turn_off_media_player(data) call_later(self.hass, 1, self.turn_on) else: _LOGGER.error("self._state is: (%s).", self._state)
def accessory_setup(self): """Handle setup of a HomeKit accessory.""" # pylint: disable=import-error from homekit.model.services import ServicesTypes self.pairing.pairing_data['AccessoryIP'] = self.host self.pairing.pairing_data['AccessoryPort'] = self.port try: data = self.pairing.list_accessories_and_characteristics() except HomeKitConnectionError: call_later(self.hass, RETRY_INTERVAL, lambda _: self.accessory_setup()) return for accessory in data: serial = get_serial(accessory) if serial in self.hass.data[KNOWN_ACCESSORIES]: continue self.hass.data[KNOWN_ACCESSORIES][serial] = self aid = accessory['aid'] for service in accessory['services']: devtype = ServicesTypes.get_short(service['type']) _LOGGER.debug("Found %s", devtype) service_info = { 'serial': serial, 'aid': aid, 'iid': service['iid'], 'model': self.model, 'device-type': devtype } component = HOMEKIT_ACCESSORY_DISPATCH.get(devtype, None) if component is not None: discovery.load_platform(self.hass, component, DOMAIN, service_info, self.config)
def accessory_setup(self): """Handle setup of a HomeKit accessory.""" # pylint: disable=import-error from homekit.model.services import ServicesTypes from homekit.exceptions import AccessoryDisconnectedError self.pairing.pairing_data['AccessoryIP'] = self.host self.pairing.pairing_data['AccessoryPort'] = self.port try: data = self.pairing.list_accessories_and_characteristics() except AccessoryDisconnectedError: call_later( self.hass, RETRY_INTERVAL, lambda _: self.accessory_setup()) return for accessory in data: aid = accessory['aid'] for service in accessory['services']: iid = service['iid'] if (aid, iid) in self.entities: # Don't add the same entity again continue devtype = ServicesTypes.get_short(service['type']) _LOGGER.debug("Found %s", devtype) service_info = {'serial': self.hkid, 'aid': aid, 'iid': service['iid'], 'model': self.model, 'device-type': devtype} component = HOMEKIT_ACCESSORY_DISPATCH.get(devtype, None) if component is not None: discovery.load_platform(self.hass, component, DOMAIN, service_info, self.config)
def accessory_setup(self): """Handle setup of a HomeKit accessory.""" import homekit # pylint: disable=import-error try: data = self.get_json('/accessories') except HomeKitConnectionError: call_later( self.hass, RETRY_INTERVAL, lambda _: self.accessory_setup()) return for accessory in data['accessories']: serial = get_serial(accessory) if serial in self.hass.data[KNOWN_ACCESSORIES]: continue self.hass.data[KNOWN_ACCESSORIES][serial] = self aid = accessory['aid'] for service in accessory['services']: service_info = {'serial': serial, 'aid': aid, 'iid': service['iid']} devtype = homekit.ServicesTypes.get_short(service['type']) _LOGGER.debug("Found %s", devtype) component = HOMEKIT_ACCESSORY_DISPATCH.get(devtype, None) if component is not None: discovery.load_platform(self.hass, component, DOMAIN, service_info, self.config)
def accessory_setup(self): """Handle setup of a HomeKit accessory.""" # pylint: disable=import-error import homekit try: data = self.get_json('/accessories') except HomeKitConnectionError: call_later(self.hass, RETRY_INTERVAL, lambda _: self.accessory_setup()) return for accessory in data['accessories']: serial = get_serial(accessory) if serial in self.hass.data[KNOWN_ACCESSORIES]: continue self.hass.data[KNOWN_ACCESSORIES][serial] = self aid = accessory['aid'] for service in accessory['services']: service_info = { 'serial': serial, 'aid': aid, 'iid': service['iid'] } devtype = homekit.ServicesTypes.get_short(service['type']) _LOGGER.debug("Found %s", devtype) component = HOMEKIT_ACCESSORY_DISPATCH.get(devtype, None) if component is not None: discovery.load_platform(self.hass, component, DOMAIN, service_info, self.config)
def _handle_event(self, event): """Handle websocket events. Used instead of polling. """ if 'push_activity' in event.data: call_later(self.hass, 2, lambda _: self.refresh(no_throttle=True))
def _adapter_watchdog(now=None): _LOGGER.debug("Reached _adapter_watchdog") event.call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog) if not adapter.initialized: _LOGGER.info("Adapter not initialized; Trying to restart") hass.bus.fire(EVENT_HDMI_CEC_UNAVAILABLE) adapter.init()
def play_media(self, media_type, media_id, _player=None, **kwargs): if not self._update_entity_ids(): return # Should skip this if input_select does not exist if _player is not None: _option = {"option": _player, "entity_id": self._media_player} self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, _option) _source = {"option": "Playlist", "entity_id": self._source} _option = {"option": media_id, "entity_id": self._playlist} self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, _source) self.hass.services.call(input_select.DOMAIN, input_select.SERVICE_SELECT_OPTION, _option) _player = self.hass.states.get(self._entity_ids) if self._playing == True: self.media_stop() self.media_play() elif self._playing == False and self._state == STATE_OFF: if _player.state == STATE_OFF: self.turn_on() else: data = {ATTR_ENTITY_ID: _player.entity_id} self._turn_off_media_player(data) call_later(self.hass, 1, self.turn_on) else: _LOGGER.error("self._state is: (%s).", self._state)
def return_to_base(self, **kwargs): """Return dock to charging base.""" if self.supported_features & SUPPORT_RETURN_HOME == 0: return self._state = STATE_RETURNING self.schedule_update_ha_state() event.call_later(self.hass, 30, self.__set_state_to_dock)
def return_to_base(self, **kwargs: Any) -> None: """Return dock to charging base.""" if self.supported_features & VacuumEntityFeature.RETURN_HOME == 0: return self._state = STATE_RETURNING self.schedule_update_ha_state() event.call_later(self.hass, 30, self.__set_state_to_dock)
def set_state(self, value): """Move switch state to value if call came from HomeKit.""" _LOGGER.debug("%s: Set switch state to %s", self.entity_id, value) if self.activate_only and not value: _LOGGER.debug("%s: Ignoring turn_off call", self.entity_id) return params = {ATTR_ENTITY_ID: self.entity_id} service = SERVICE_TURN_ON if value else SERVICE_TURN_OFF self.call_service(self._domain, service, params) if self.activate_only: call_later(self.hass, 1, self.reset_switch)
def setup_platform(hass, config, add_devices_callback, discovery_info=None): """Set up the Alexa switch platform.""" _LOGGER.debug("Loading switches") devices = [] # type: List[DNDSwitch] SWITCH_TYPES = [ ('dnd', DNDSwitch), ('shuffle', ShuffleSwitch), ('repeat', RepeatSwitch) ] for account, account_dict in (hass.data[DATA_ALEXAMEDIA] ['accounts'].items()): for key, device in account_dict['devices']['media_player'].items(): if 'switch' not in account_dict['entities']: (hass.data[DATA_ALEXAMEDIA] ['accounts'] [account] ['entities'] ['switch']) = {} if key not in account_dict['entities']['media_player']: _LOGGER.debug("Media Players not loaded yet; delaying load") call_later(hass, 5, lambda _: setup_platform(hass, config, add_devices_callback, discovery_info)) return True elif key not in account_dict['entities']['switch']: (hass.data[DATA_ALEXAMEDIA] ['accounts'] [account] ['entities'] ['switch'][key]) = {} for (switch_key, class_) in SWITCH_TYPES: alexa_client = class_(account_dict['entities'] ['media_player'] [key], hass, account) # type: AlexaMediaSwitch (hass.data[DATA_ALEXAMEDIA] ['accounts'] [account] ['entities'] ['switch'][key][switch_key]) = alexa_client _LOGGER.debug("%s: Found %s %s switch with status: %s", hide_email(account), hide_serial(key), switch_key, alexa_client.is_on) devices.append(alexa_client) if devices: add_devices_callback(devices, True) return True
def turn_on(self, *args, **kwargs): """ Turn on the selected media_player from input_select """ self._playing = False if not self._update_entity_ids(): return _player = self.hass.states.get(self._entity_ids) data = {ATTR_ENTITY_ID: _player.entity_id} if _player.state == STATE_OFF: self._unsub_tracker = track_state_change(self.hass, _player.entity_id, self._sync_player) self._turn_on_media_player(data) elif _player.state != STATE_OFF: self._turn_off_media_player(data) call_later(self.hass, 1, self.turn_on)
def _schedule_update(self): if not self.hass: return # Update the new state self.schedule_update_ha_state(False) # nuheat has a delay switching state # so we schedule a poll of the api # in the future to make sure the change actually # took effect event_helper.call_later(self.hass, NUHEAT_API_STATE_SHIFT_DELAY, self._forced_refresh)
def set_state(self, value): """Move switch state to value if call came from HomeKit.""" _LOGGER.debug('%s: Set switch state to %s', self.entity_id, value) if self.activate_only and value == 0: _LOGGER.debug('%s: Ignoring turn_off call', self.entity_id) return self._flag_state = True params = {ATTR_ENTITY_ID: self.entity_id} service = SERVICE_TURN_ON if value else SERVICE_TURN_OFF self.call_service(self._domain, service, params) if self.activate_only: call_later(self.hass, 1, self.reset_switch)
def test_call_later(self): """Test calling an action later.""" def action(): pass now = datetime(2017, 12, 19, 15, 40, 0, tzinfo=dt_util.UTC) with patch('homeassistant.helpers.event' '.async_track_point_in_utc_time') as mock, \ patch('homeassistant.util.dt.utcnow', return_value=now): call_later(self.hass, 3, action) assert len(mock.mock_calls) == 1 p_hass, p_action, p_point = mock.mock_calls[0][1] assert p_hass is self.hass assert p_action is action assert p_point == now + timedelta(seconds=3)
def setup(hass, config): prana_data = hass.data[DOMAIN] = {} prana_data[CONF_DEVICES] = [] conf = config.get(DOMAIN) if CONF_DEVICES not in conf: return True scan_interval = conf.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL).total_seconds() for device_conf in conf[CONF_DEVICES]: prana_data[CONF_DEVICES].append({}) device = prana_data[CONF_DEVICES][-1] mac = device_conf.get(CONF_MAC) device[CLIENT] = prana.Prana(mac) device[CONFIG] = device_conf for platform in ["fan", "sensor"]: load_platform(hass, platform, DOMAIN, {}, conf) for device in prana_data[CONF_DEVICES]: prana_client = device[CLIENT] def device_update(): """Update Prana device.""" for device in prana_data[CONF_DEVICES]: prana_client = device[CLIENT] _LOGGER.debug("Updating Prana device... %s", prana_client.mac) if prana_client.getStatusDetails(): _LOGGER.debug("Update success...") dispatcher_send(hass, SIGNAL_UPDATE_PRANA + prana_client.mac) else: _LOGGER.debug("Update failed...") def poll_devices(time): # use threads as sometimes device will hang on disconnect (maybe some issue on bluepy) thread = threading.Thread(name='PranaWorker', target=device_update) thread.daemon = True thread.start() call_later(hass, scan_interval, poll_devices) # track_time_interval(hass, device_update, timedelta(seconds=scan_interval)) call_later(hass, 0, poll_devices) #trigger update now return True
def update(self): """Get the latest details on a media player. Because media players spend the majority of time idle, an adaptive update should be used to avoid flooding Amazon focusing on known play states. An initial version included an update_devices call on every update. However, this quickly floods the network for every new device added. This should only call refresh() to call the AlexaAPI. """ if (self._device is None or self.entity_id is None): # Device has not initialized yet return email = self._login.email device = (self.hass.data[DATA_ALEXAMEDIA] ['accounts'] [email] ['devices'] ['media_player'] [self.unique_id]) self.refresh(device, # pylint: disable=unexpected-keyword-arg no_throttle=True) if (self.state in [STATE_PLAYING] and # only enable polling if websocket not connected (not self.hass.data[DATA_ALEXAMEDIA] ['accounts'][email]['websocket'])): self._should_poll = False # disable polling since manual update if(self._last_update == 0 or util.dt.as_timestamp(util.utcnow()) - util.dt.as_timestamp(self._last_update) > PLAY_SCAN_INTERVAL): _LOGGER.debug("%s playing; scheduling update in %s seconds", self.name, PLAY_SCAN_INTERVAL) call_later(self.hass, PLAY_SCAN_INTERVAL, lambda _: self.schedule_update_ha_state(force_refresh=True)) elif self._should_poll: # Not playing, one last poll self._should_poll = False if not (self.hass.data[DATA_ALEXAMEDIA] ['accounts'][email]['websocket']): _LOGGER.debug("Disabling polling and scheduling last update in" " 300 seconds for %s", self.name) call_later(self.hass, 300, lambda _: self.schedule_update_ha_state(force_refresh=True)) else: _LOGGER.debug("Disabling polling for %s", self.name) self._last_update = util.utcnow() self.schedule_update_ha_state()
def _retry(_data): try: entities = find_devices(_data) except requests.exceptions.Timeout: return call_later(hass, NETATMO_UPDATE_INTERVAL, lambda _: _retry(_data)) if entities: add_entities(entities, True)
def _update_soon(self, delay): """Reschedule update task.""" if self._update_task: self._update_task() self._update_task = None self.schedule_update_ha_state(force_refresh=False) def update_forced(event_time): self.schedule_update_ha_state(force_refresh=True) def update_and_restart(event_time): update_forced(event_time) self._update_task = track_time_interval( self.hass, update_forced, timedelta(seconds=DEFAULT_SCAN_INTERVAL)) call_later(self.hass, delay, update_and_restart)
def __init__(self, hass, username, password, cost): """Initialise the Gazpar account.""" self._username = username self._password = password self._cost = cost self.sensors = [] call_later(hass, 5, self.update_gazpar_data) # Add sensors self.sensors.append( GazparSensor(HA_LAST_ENERGY_KWH, ENERGY_KILO_WATT_HOUR)) self.sensors.append(GazparSensor(HA_LAST_ENERGY_PRICE, CURRENCY_EURO)) self.sensors.append( GazparSensor(HA_MONTH_ENERGY_KWH, ENERGY_KILO_WATT_HOUR)) self.sensors.append(GazparSensor(HA_MONTH_ENERGY_PRICE, CURRENCY_EURO)) track_time_interval(hass, self.update_gazpar_data, DEFAULT_SCAN_INTERVAL)
def __init__(self, hass, username: str, password: str, pceIdentifier: str, wait_time: int, tmpdir: str, scan_interval: int, testMode: bool): """Initialise the Gazpar account.""" self._username = username self._password = password self._pceIdentifier = pceIdentifier self._wait_time = wait_time self._tmpdir = tmpdir self._scan_interval = scan_interval self._testMode = testMode self._dataByFrequency = {} self.sensors = [] lastIndexByFrequence = { Frequency.HOURLY: LAST_INDEX, Frequency.DAILY: LAST_INDEX, Frequency.WEEKLY: BEFORE_LAST_INDEX, Frequency.MONTHLY: BEFORE_LAST_INDEX, } for frequency in Frequency: if frequency is not Frequency.HOURLY: # Hourly not yet implemented. self.sensors.append( GazparSensor(HA_LAST_ENERGY_KWH_BY_FREQUENCY[frequency], PropertyName.ENERGY.value, ENERGY_KILO_WATT_HOUR, lastIndexByFrequence[frequency], frequency, self)) if hass is not None: call_later(hass, 5, self.update_gazpar_data) track_time_interval(hass, self.update_gazpar_data, self._scan_interval) else: self.update_gazpar_data(None)
def _manual_hosts(self, now: datetime.datetime | None = None) -> None: """Players from network configuration.""" for host in self.hosts: ip_addr = socket.gethostbyname(host) known_uid = next( (uid for uid, speaker in self.data.discovered.items() if speaker.soco.ip_address == ip_addr), None, ) if not known_uid: soco = self._create_soco(ip_addr, SoCoCreationSource.CONFIGURED) if soco and soco.is_visible: self._discovered_player(soco) self.data.hosts_heartbeat = call_later( self.hass, DISCOVERY_INTERVAL.total_seconds(), self._manual_hosts)
def _schedule_poll(self, delay: float) -> None: self._cancel_poll = call_later(self._hass, delay, self._run_poll_server)
def setup(hass, config): """Set up the InfluxDB component.""" from influxdb import InfluxDBClient, exceptions conf = config[DOMAIN] kwargs = { 'database': conf[CONF_DB_NAME], 'verify_ssl': conf[CONF_VERIFY_SSL], 'timeout': TIMEOUT } if CONF_HOST in conf: kwargs['host'] = conf[CONF_HOST] if CONF_PORT in conf: kwargs['port'] = conf[CONF_PORT] if CONF_USERNAME in conf: kwargs['username'] = conf[CONF_USERNAME] if CONF_PASSWORD in conf: kwargs['password'] = conf[CONF_PASSWORD] if CONF_SSL in conf: kwargs['ssl'] = conf[CONF_SSL] include = conf.get(CONF_INCLUDE, {}) exclude = conf.get(CONF_EXCLUDE, {}) whitelist_e = set(include.get(CONF_ENTITIES, [])) whitelist_d = set(include.get(CONF_DOMAINS, [])) blacklist_e = set(exclude.get(CONF_ENTITIES, [])) blacklist_d = set(exclude.get(CONF_DOMAINS, [])) tags = conf.get(CONF_TAGS) tags_attributes = conf.get(CONF_TAGS_ATTRIBUTES) default_measurement = conf.get(CONF_DEFAULT_MEASUREMENT) override_measurement = conf.get(CONF_OVERRIDE_MEASUREMENT) component_config = EntityValues( conf[CONF_COMPONENT_CONFIG], conf[CONF_COMPONENT_CONFIG_DOMAIN], conf[CONF_COMPONENT_CONFIG_GLOB]) max_tries = conf.get(CONF_RETRY_COUNT) try: influx = InfluxDBClient(**kwargs) influx.write_points([]) except (exceptions.InfluxDBClientError, requests.exceptions.ConnectionError) as exc: _LOGGER.warning( "Database host is not accessible due to '%s', please " "check your entries in the configuration file (host, " "port, etc.) and verify that the database exists and is " "READ/WRITE. Retrying again in %s seconds.", exc, RETRY_INTERVAL ) event_helper.call_later( hass, RETRY_INTERVAL, lambda _: setup(hass, config) ) return True def event_to_json(event): """Add an event to the outgoing Influx list.""" state = event.data.get('new_state') if state is None or state.state in ( STATE_UNKNOWN, '', STATE_UNAVAILABLE) or \ state.entity_id in blacklist_e or state.domain in blacklist_d: return try: if ((whitelist_e or whitelist_d) and state.entity_id not in whitelist_e and state.domain not in whitelist_d): return _include_state = _include_value = False _state_as_value = float(state.state) _include_value = True except ValueError: try: _state_as_value = float(state_helper.state_as_number(state)) _include_state = _include_value = True except ValueError: _include_state = True include_uom = True measurement = component_config.get(state.entity_id).get( CONF_OVERRIDE_MEASUREMENT) if measurement in (None, ''): if override_measurement: measurement = override_measurement else: measurement = state.attributes.get('unit_of_measurement') if measurement in (None, ''): if default_measurement: measurement = default_measurement else: measurement = state.entity_id else: include_uom = False json = { 'measurement': measurement, 'tags': { 'domain': state.domain, 'entity_id': state.object_id, }, 'time': event.time_fired, 'fields': {} } if _include_state: json['fields']['state'] = state.state if _include_value: json['fields']['value'] = _state_as_value for key, value in state.attributes.items(): if key in tags_attributes: json['tags'][key] = value elif key != 'unit_of_measurement' or include_uom: # If the key is already in fields if key in json['fields']: key = key + "_" # Prevent column data errors in influxDB. # For each value we try to cast it as float # But if we can not do it we store the value # as string add "_str" postfix to the field key try: json['fields'][key] = float(value) except (ValueError, TypeError): new_key = "{}_str".format(key) new_value = str(value) json['fields'][new_key] = new_value if RE_DIGIT_TAIL.match(new_value): json['fields'][key] = float( RE_DECIMAL.sub('', new_value)) # Infinity and NaN are not valid floats in InfluxDB try: if not math.isfinite(json['fields'][key]): del json['fields'][key] except (KeyError, TypeError): pass json['tags'].update(tags) return json instance = hass.data[DOMAIN] = InfluxThread( hass, influx, event_to_json, max_tries) instance.start() def shutdown(event): """Shut down the thread.""" instance.queue.put(None) instance.join() influx.close() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown) return True
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the available Netatmo weather sensors.""" dev = [] auth = hass.data[DATA_NETATMO_AUTH] if config.get(CONF_AREAS) is not None: for area in config[CONF_AREAS]: data = NetatmoPublicData( auth, lat_ne=area[CONF_LAT_NE], lon_ne=area[CONF_LON_NE], lat_sw=area[CONF_LAT_SW], lon_sw=area[CONF_LON_SW], ) for sensor_type in SUPPORTED_PUBLIC_SENSOR_TYPES: dev.append( NetatmoPublicSensor(area[CONF_NAME], data, sensor_type, area[CONF_MODE])) else: def find_devices(data): """Find all devices.""" all_module_infos = data.get_module_infos() all_module_names = [ e["module_name"] for e in all_module_infos.values() ] module_names = config.get(CONF_MODULES, all_module_names) entities = [] for module_name in module_names: if module_name not in all_module_names: _LOGGER.info("Module %s not found", module_name) for module in all_module_infos.values(): if module["module_name"] not in module_names: continue _LOGGER.debug("Adding module %s %s", module["module_name"], module["id"]) for condition in data.station_data.monitoredConditions( moduleId=module["id"]): entities.append( NetatmoSensor(data, module, condition.lower())) return entities def _retry(_data): try: entities = find_devices(_data) except requests.exceptions.Timeout: return call_later(hass, NETATMO_UPDATE_INTERVAL, lambda _: _retry(_data)) if entities: add_entities(entities, True) for data_class in [pyatmo.WeatherStationData, pyatmo.HomeCoachData]: try: data = NetatmoData(auth, data_class, config.get(CONF_STATION)) except pyatmo.NoDevice: _LOGGER.info("No %s devices found", NETATMO_DEVICE_TYPES[data_class.__name__]) continue try: dev.extend(find_devices(data)) except requests.exceptions.Timeout: call_later(hass, NETATMO_UPDATE_INTERVAL, lambda _: _retry(data)) if dev: add_entities(dev, True)
class SonosDiscoveryManager: """Manage sonos discovery.""" def __init__( self, hass: HomeAssistant, entry: ConfigEntry, data: SonosData, hosts: list[str] ) -> None: """Init discovery manager.""" self.hass = hass self.entry = entry self.data = data self.hosts = set(hosts) self.discovery_lock = asyncio.Lock() self._known_invisible = set() self._manual_config_required = bool(hosts) async def async_shutdown(self): """Stop all running tasks.""" await self._async_stop_event_listener() self._stop_manual_heartbeat() def is_device_invisible(self, ip_address: str) -> bool: """Check if device at provided IP is known to be invisible.""" return any(x for x in self._known_invisible if x.ip_address == ip_address) def _create_visible_speakers(self, ip_address: str) -> None: """Create all visible SonosSpeaker instances with the provided seed IP.""" try: soco = SoCo(ip_address) visible_zones = soco.visible_zones self._known_invisible = soco.all_zones - visible_zones except (OSError, SoCoException) as ex: _LOGGER.warning( "Failed to request visible zones from %s: %s", ip_address, ex ) return for zone in visible_zones: if zone.uid not in self.data.discovered: self._add_speaker(zone) async def _async_stop_event_listener(self, event: Event | None = None) -> None: for speaker in self.data.discovered.values(): speaker.activity_stats.log_report() speaker.event_stats.log_report() await asyncio.gather( *(speaker.async_offline() for speaker in self.data.discovered.values()) ) if events_asyncio.event_listener: await events_asyncio.event_listener.async_stop() def _stop_manual_heartbeat(self, event: Event | None = None) -> None: if self.data.hosts_heartbeat: self.data.hosts_heartbeat() self.data.hosts_heartbeat = None def _add_speaker(self, soco: SoCo) -> None: """Create and set up a new SonosSpeaker instance.""" try: speaker_info = soco.get_speaker_info(True) if soco.uid not in self.data.boot_counts: self.data.boot_counts[soco.uid] = soco.boot_seqnum _LOGGER.debug("Adding new speaker: %s", speaker_info) speaker = SonosSpeaker(self.hass, soco, speaker_info) self.data.discovered[soco.uid] = speaker for coordinator, coord_dict in ( (SonosAlarms, self.data.alarms), (SonosFavorites, self.data.favorites), ): if soco.household_id not in coord_dict: new_coordinator = coordinator(self.hass, soco.household_id) new_coordinator.setup(soco) coord_dict[soco.household_id] = new_coordinator speaker.setup(self.entry) except (OSError, SoCoException): _LOGGER.warning("Failed to add SonosSpeaker using %s", soco, exc_info=True) def _poll_manual_hosts(self, now: datetime.datetime | None = None) -> None: """Add and maintain Sonos devices from a manual configuration.""" for host in self.hosts: ip_addr = socket.gethostbyname(host) soco = SoCo(ip_addr) try: visible_zones = soco.visible_zones except OSError: _LOGGER.warning("Could not get visible Sonos devices from %s", ip_addr) else: if new_hosts := { x.ip_address for x in visible_zones if x.ip_address not in self.hosts }: _LOGGER.debug("Adding to manual hosts: %s", new_hosts) self.hosts.update(new_hosts) dispatcher_send( self.hass, f"{SONOS_SPEAKER_ACTIVITY}-{soco.uid}", "manual zone scan", ) break for host in self.hosts.copy(): ip_addr = socket.gethostbyname(host) if self.is_device_invisible(ip_addr): _LOGGER.debug("Discarding %s from manual hosts", ip_addr) self.hosts.discard(ip_addr) continue known_speaker = next( ( speaker for speaker in self.data.discovered.values() if speaker.soco.ip_address == ip_addr ), None, ) if not known_speaker: self._create_visible_speakers(ip_addr) elif not known_speaker.available: try: known_speaker.soco.renderingControl.GetVolume( [("InstanceID", 0), ("Channel", "Master")], timeout=1 ) except OSError: _LOGGER.debug( "Manual poll to %s failed, keeping unavailable", ip_addr ) else: dispatcher_send( self.hass, f"{SONOS_SPEAKER_ACTIVITY}-{known_speaker.uid}", "manual rediscovery", ) self.data.hosts_heartbeat = call_later( self.hass, DISCOVERY_INTERVAL.total_seconds(), self._poll_manual_hosts )
def setup(hass, config): """Set up the InfluxDB component.""" from influxdb import InfluxDBClient, exceptions conf = config[DOMAIN] kwargs = { "database": conf[CONF_DB_NAME], "verify_ssl": conf[CONF_VERIFY_SSL], "timeout": TIMEOUT, } if CONF_HOST in conf: kwargs["host"] = conf[CONF_HOST] if CONF_PORT in conf: kwargs["port"] = conf[CONF_PORT] if CONF_USERNAME in conf: kwargs["username"] = conf[CONF_USERNAME] if CONF_PASSWORD in conf: kwargs["password"] = conf[CONF_PASSWORD] if CONF_SSL in conf: kwargs["ssl"] = conf[CONF_SSL] include = conf.get(CONF_INCLUDE, {}) exclude = conf.get(CONF_EXCLUDE, {}) whitelist_e = set(include.get(CONF_ENTITIES, [])) whitelist_d = set(include.get(CONF_DOMAINS, [])) blacklist_e = set(exclude.get(CONF_ENTITIES, [])) blacklist_d = set(exclude.get(CONF_DOMAINS, [])) tags = conf.get(CONF_TAGS) tags_attributes = conf.get(CONF_TAGS_ATTRIBUTES) default_measurement = conf.get(CONF_DEFAULT_MEASUREMENT) override_measurement = conf.get(CONF_OVERRIDE_MEASUREMENT) component_config = EntityValues( conf[CONF_COMPONENT_CONFIG], conf[CONF_COMPONENT_CONFIG_DOMAIN], conf[CONF_COMPONENT_CONFIG_GLOB], ) max_tries = conf.get(CONF_RETRY_COUNT) try: influx = InfluxDBClient(**kwargs) influx.write_points([]) except (exceptions.InfluxDBClientError, requests.exceptions.ConnectionError) as exc: _LOGGER.warning( "Database host is not accessible due to '%s', please " "check your entries in the configuration file (host, " "port, etc.) and verify that the database exists and is " "READ/WRITE. Retrying again in %s seconds.", exc, RETRY_INTERVAL, ) event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config)) return True def event_to_json(event): """Add an event to the outgoing Influx list.""" state = event.data.get("new_state") if (state is None or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE) or state.entity_id in blacklist_e or state.domain in blacklist_d): return try: if ((whitelist_e or whitelist_d) and state.entity_id not in whitelist_e and state.domain not in whitelist_d): return _include_state = _include_value = False _state_as_value = float(state.state) _include_value = True except ValueError: try: _state_as_value = float(state_helper.state_as_number(state)) _include_state = _include_value = True except ValueError: _include_state = True include_uom = True measurement = component_config.get( state.entity_id).get(CONF_OVERRIDE_MEASUREMENT) if measurement in (None, ""): if override_measurement: measurement = override_measurement else: measurement = state.attributes.get("unit_of_measurement") if measurement in (None, ""): if default_measurement: measurement = default_measurement else: measurement = state.entity_id else: include_uom = False json = { "measurement": measurement, "tags": { "domain": state.domain, "entity_id": state.object_id }, "time": event.time_fired, "fields": {}, } if _include_state: json["fields"]["state"] = state.state if _include_value: json["fields"]["value"] = _state_as_value for key, value in state.attributes.items(): if key in tags_attributes: json["tags"][key] = value elif key != "unit_of_measurement" or include_uom: # If the key is already in fields if key in json["fields"]: key = key + "_" # Prevent column data errors in influxDB. # For each value we try to cast it as float # But if we can not do it we store the value # as string add "_str" postfix to the field key try: json["fields"][key] = float(value) except (ValueError, TypeError): new_key = f"{key}_str" new_value = str(value) json["fields"][new_key] = new_value if RE_DIGIT_TAIL.match(new_value): json["fields"][key] = float( RE_DECIMAL.sub("", new_value)) # Infinity and NaN are not valid floats in InfluxDB try: if not math.isfinite(json["fields"][key]): del json["fields"][key] except (KeyError, TypeError): pass json["tags"].update(tags) return json instance = hass.data[DOMAIN] = InfluxThread(hass, influx, event_to_json, max_tries) instance.start() def shutdown(event): """Shut down the thread.""" instance.queue.put(None) instance.join() influx.close() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown) return True
def setup(hass, config): """Set up the Zabbix component.""" conf = config[DOMAIN] protocol = "https" if conf[CONF_SSL] else "http" url = urljoin(f"{protocol}://{conf[CONF_HOST]}", conf[CONF_PATH]) username = conf.get(CONF_USERNAME) password = conf.get(CONF_PASSWORD) publish_states_host = conf.get(CONF_PUBLISH_STATES_HOST) entities_filter = convert_include_exclude_filter(conf) try: zapi = ZabbixAPI(url=url, user=username, password=password) _LOGGER.info("Connected to Zabbix API Version %s", zapi.api_version()) except ZabbixAPIException as login_exception: _LOGGER.error("Unable to login to the Zabbix API: %s", login_exception) return False except HTTPError as http_error: _LOGGER.error("HTTPError when connecting to Zabbix API: %s", http_error) zapi = None _LOGGER.error(RETRY_MESSAGE, http_error) event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config)) return True hass.data[DOMAIN] = zapi def event_to_metrics(event, float_keys, string_keys): """Add an event to the outgoing Zabbix list.""" state = event.data.get("new_state") if state is None or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE): return entity_id = state.entity_id if not entities_filter(entity_id): return floats = {} strings = {} try: _state_as_value = float(state.state) floats[entity_id] = _state_as_value except ValueError: try: _state_as_value = float(state_helper.state_as_number(state)) floats[entity_id] = _state_as_value except ValueError: strings[entity_id] = state.state for key, value in state.attributes.items(): # For each value we try to cast it as float # But if we can not do it we store the value # as string attribute_id = f"{entity_id}/{key}" try: float_value = float(value) except (ValueError, TypeError): float_value = None if float_value is None or not math.isfinite(float_value): strings[attribute_id] = str(value) else: floats[attribute_id] = float_value metrics = [] float_keys_count = len(float_keys) float_keys.update(floats) if len(float_keys) != float_keys_count: floats_discovery = [] for float_key in float_keys: floats_discovery.append({"{#KEY}": float_key}) metric = ZabbixMetric( publish_states_host, "homeassistant.floats_discovery", json.dumps(floats_discovery), ) metrics.append(metric) for key, value in floats.items(): metric = ZabbixMetric(publish_states_host, f"homeassistant.float[{key}]", value) metrics.append(metric) string_keys.update(strings) return metrics if publish_states_host: zabbix_sender = ZabbixSender(zabbix_server=conf[CONF_HOST]) instance = ZabbixThread(hass, zabbix_sender, event_to_metrics) instance.setup(hass) return True