def __init__(self, hass, name, input_sensor, operation, interval, unit_of_measurement): """Initialize.""" self.name = name self.input_sensor = input_sensor self.operation = operation self.interval = int(interval) self.unit_of_measurement = unit_of_measurement self.hass = hass self.entities = {} self.platforms = [] self.entry_setup_completed = False SCAN_INTERVAL = timedelta(minutes=self.interval) super().__init__(hass, _LOGGER, name=name, update_interval=SCAN_INTERVAL) # reset happens at midnight async_track_time_change( hass, self._async_reset, hour=0, minute=0, second=0, ) self.entry_setup_completed = True
async def async_added_to_hass(self): """Handle entity which will be added.""" await super().async_added_to_hass() state = await self.async_get_last_state() if state: self._state = state.state # Update 'state' value in hour changes self._hourly_tracker = async_track_time_change(self.hass, self.async_update, second=[0], minute=[0]) # Update prices at random time, 3 times/hour (don't want to upset API) random_minute = randint(1, 19) mins_update = [random_minute + 20 * i for i in range(3)] self._price_tracker = async_track_time_change(self.hass, self.async_update_prices, second=[0], minute=mins_update) _LOGGER.info( "Setup of price sensor %s (%s) with tariff '%s', " "updating prices each hour at %s min", self.name, self.entity_id, self._tariff, mins_update, ) await self.async_update_prices() self._init_done = True await self.async_update_ha_state(True)
async def async_added_to_hass(self): """Handle entity which will be added.""" await super().async_added_to_hass() state = await self.async_get_last_state() if state: self._pvpc_data.state = state.state # Update 'state' value in hour changes self._hourly_tracker = async_track_time_change( self.hass, self.update_current_price, second=[0], minute=[0]) # Update prices at random time, 2 times/hour (don't want to upset API) random_minute = randint(1, 29) mins_update = [random_minute, random_minute + 30] self._price_tracker = async_track_time_change( self.hass, self.async_update_prices, second=[0], minute=mins_update, ) _LOGGER.debug( "Setup of price sensor %s (%s) with tariff '%s', " "updating prices each hour at %s min", self.name, self.entity_id, self._pvpc_data.tariff, mins_update, ) await self.async_update_prices(dt_util.utcnow()) self.update_current_price(dt_util.utcnow())
async def async_setup(hass: HomeAssistant, config: Config) -> bool: """Set up using yaml config file.""" if DOMAIN not in hass.data: api = NordpoolData(hass) hass.data[DOMAIN] = api async def new_day_cb(n): """Cb to handle some house keeping when it a new day.""" _LOGGER.debug("Called new_day_cb callback") api._tomorrow_valid = False for curr in api.currency: if not len(api._data[curr]["tomorrow"]): api._data[curr]["today"] = await api.update_today(None) else: api._data[curr]["today"] = api._data[curr]["tomorrow"] api._data[curr]["tomorrow"] = {} async_dispatcher_send(hass, EVENT_NEW_DATA) async def new_hr(n): """Callback to tell the sensors to update on a new hour.""" _LOGGER.debug("Called new_hr callback") async_dispatcher_send(hass, EVENT_NEW_DATA) async def new_data_cb(n): """Callback to fetch new data for tomorrows prices at 1300ish CET and notify any sensors, about the new data """ _LOGGER.debug("Called new_data_cb") await api.update_tomorrow(n) async_dispatcher_send(hass, EVENT_NEW_DATA) # Handles futures updates cb_update_tomorrow = async_track_time_change_in_tz( hass, new_data_cb, hour=13, minute=RANDOM_MINUTE, second=RANDOM_SECOND, tz=timezone("Europe/Stockholm"), ) cb_new_day = async_track_time_change(hass, new_day_cb, hour=0, minute=0, second=0) cb_new_hr = async_track_time_change(hass, new_hr, minute=0, second=0) api.listeners.append(cb_update_tomorrow) api.listeners.append(cb_new_hr) api.listeners.append(cb_new_day) return True
async def async_added_to_hass(self): """Handle entity which will be added.""" await super().async_added_to_hass() if self._period == HOURLY: async_track_time_change( self.hass, self._async_reset_meter, minute=self._period_offset.seconds // 60, second=self._period_offset.seconds % 60, ) elif self._period in [DAILY, WEEKLY, MONTHLY, BIMONTHLY, QUARTERLY, YEARLY]: async_track_time_change( self.hass, self._async_reset_meter, hour=self._period_offset.seconds // 3600, minute=self._period_offset.seconds % 3600 // 60, second=self._period_offset.seconds % 3600 % 60, ) async_dispatcher_connect(self.hass, SIGNAL_RESET_METER, self.async_reset_meter) state = await self.async_get_last_state() if state: self._state = Decimal(state.state) self._unit_of_measurement = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) self._last_period = state.attributes.get(ATTR_LAST_PERIOD) self._last_reset = dt_util.parse_datetime( state.attributes.get(ATTR_LAST_RESET) ) self.async_write_ha_state() if state.attributes.get(ATTR_STATUS) == PAUSED: # Fake cancellation function to init the meter paused self._collecting = lambda: None @callback def async_source_tracking(event): """Wait for source to be ready, then start meter.""" if self._tariff_entity is not None: _LOGGER.debug("Track %s", self._tariff_entity) async_track_state_change_event( self.hass, [self._tariff_entity], self.async_tariff_change ) tariff_entity_state = self.hass.states.get(self._tariff_entity) if self._tariff != tariff_entity_state.state: return _LOGGER.debug("tracking source: %s", self._sensor_source_id) self._collecting = async_track_state_change_event( self.hass, [self._sensor_source_id], self.async_reading ) self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_START, async_source_tracking )
def __init__( self, hass, min_colortemp, max_colortemp, sunrise_offset, sunset_offset, sunrise_time, sunset_time, latitude, longitude, elevation, interval, transition, ): self.hass = hass self._min_colortemp = min_colortemp self._max_colortemp = max_colortemp self._sunrise_offset = sunrise_offset self._sunset_offset = sunset_offset self._manual_sunset = sunset_time self._manual_sunrise = sunrise_time self._latitude = latitude self._longitude = longitude self._elevation = elevation self._transition = transition self._percent = self.calc_percent() self._colortemp = self.calc_colortemp() self._rgb_color = self.calc_rgb() self._xy_color = self.calc_xy() self._hs_color = self.calc_hs() if self._manual_sunrise is not None: async_track_time_change( self.hass, self.update, hour=self._manual_sunrise.hour, minute=self._manual_sunrise.minute, second=self._manual_sunrise.second, ) else: async_track_sunrise(self.hass, self.update, self._sunrise_offset) if self._manual_sunset is not None: async_track_time_change( self.hass, self.update, hour=self._manual_sunset.hour, minute=self._manual_sunset.minute, second=self._manual_sunset.second, ) else: async_track_sunset(self.hass, self.update, self._sunset_offset) async_track_time_interval(self.hass, self.update, interval)
def _setup_fixed_updating(self): # Default behavior is to update every local midnight. # Override for sensor types that should update at a different time, # or that have a more dynamic update schedule (in which case override # with a method that does nothing and set up the update at the end of # an override of _update instead.) @callback def async_update_at_midnight(now): self.async_schedule_update_ha_state(True) async_track_time_change(self.hass, async_update_at_midnight, 0, 0, 0)
def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the LunarCalendar sensor.""" key = config.get(CONF_KEY) data = JuheLunarCalendarData(hass, key) yield from data.async_update(dt_util.now()) async_track_time_change( hass, data.async_update, hour=[0], minute=[0], second=[1] ) dev = [] dev.append(JuheLunarCalendarSensor(data)) async_add_devices(dev, True)
def __init__(self, hass, key): """Initialize the data object.""" self.story = {} self.hass = hass self.url = "http://v.juhe.cn/joke/content/text.php" self.key = key self.state = None self.update(dt_util.now()) async_track_time_change( self.hass, self.update, hour=[0], minute=[0], second=[1] )
async def async_added_to_hass(self): """Handle entity which will be added.""" await super().async_added_to_hass() if self._period == HOURLY: async_track_time_change( self.hass, self._async_reset_meter, minute=self._period_offset.seconds // 60, second=self._period_offset.seconds % 60) elif self._period in [DAILY, WEEKLY, MONTHLY, YEARLY]: async_track_time_change( self.hass, self._async_reset_meter, hour=self._period_offset.seconds // 3600, minute=self._period_offset.seconds % 3600 // 60, second=self._period_offset.seconds % 3600 % 60) async_dispatcher_connect( self.hass, SIGNAL_RESET_METER, self.async_reset_meter) state = await self.async_get_last_state() if state: self._state = Decimal(state.state) self._unit_of_measurement = state.attributes.get( ATTR_UNIT_OF_MEASUREMENT) self._last_period = state.attributes.get(ATTR_LAST_PERIOD) self._last_reset = state.attributes.get(ATTR_LAST_RESET) await self.async_update_ha_state() if state.attributes.get(ATTR_STATUS) == PAUSED: # Fake cancellation function to init the meter paused self._collecting = lambda: None @callback def async_source_tracking(event): """Wait for source to be ready, then start meter.""" if self._tariff_entity is not None: _LOGGER.debug("Track %s", self._tariff_entity) async_track_state_change( self.hass, self._tariff_entity, self.async_tariff_change) tariff_entity_state = self.hass.states.get(self._tariff_entity) if self._tariff != tariff_entity_state.state: return _LOGGER.debug("tracking source: %s", self._sensor_source_id) self._collecting = async_track_state_change( self.hass, self._sensor_source_id, self.async_reading) self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_START, async_source_tracking)
def async_trigger(hass, config, action): """Listen for state changes based on configuration.""" if CONF_AT in config: at_time = config.get(CONF_AT) hours, minutes, seconds = at_time.hour, at_time.minute, at_time.second elif CONF_AFTER in config: _LOGGER.warning("'after' is deprecated for the time trigger. Please " "rename 'after' to 'at' in your configuration file.") at_time = config.get(CONF_AFTER) hours, minutes, seconds = at_time.hour, at_time.minute, at_time.second else: hours = config.get(CONF_HOURS) minutes = config.get(CONF_MINUTES) seconds = config.get(CONF_SECONDS) @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, { 'trigger': { 'platform': 'time', 'now': now, }, }) return async_track_time_change(hass, time_automation_listener, hour=hours, minute=minutes, second=seconds)
async def test_async_track_time_change(hass): """Test tracking time change.""" wildcard_runs = [] specific_runs = [] unsub = async_track_time_change(hass, lambda x: wildcard_runs.append(1)) unsub_utc = async_track_utc_time_change(hass, lambda x: specific_runs.append(1), second=[0, 30]) _send_time_changed(hass, datetime(2014, 5, 24, 12, 0, 0)) await hass.async_block_till_done() assert len(specific_runs) == 1 assert len(wildcard_runs) == 1 _send_time_changed(hass, datetime(2014, 5, 24, 12, 0, 15)) await hass.async_block_till_done() assert len(specific_runs) == 1 assert len(wildcard_runs) == 2 _send_time_changed(hass, datetime(2014, 5, 24, 12, 0, 30)) await hass.async_block_till_done() assert len(specific_runs) == 2 assert len(wildcard_runs) == 3 unsub() unsub_utc() _send_time_changed(hass, datetime(2014, 5, 24, 12, 0, 30)) await hass.async_block_till_done() assert len(specific_runs) == 2 assert len(wildcard_runs) == 3
async def async_attach_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" hours = config.get(CONF_HOURS) minutes = config.get(CONF_MINUTES) seconds = config.get(CONF_SECONDS) # If larger units are specified, default the smaller units to zero if minutes is None and hours is not None: minutes = 0 if seconds is None and minutes is not None: seconds = 0 @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job( action, {"trigger": { "platform": "time_pattern", "now": now }}) return async_track_time_change(hass, time_automation_listener, hour=hours, minute=minutes, second=seconds)
async def test_periodic_task_entering_dst(hass): """Test periodic task behavior when entering dst.""" timezone = dt_util.get_time_zone("Europe/Vienna") dt_util.set_default_time_zone(timezone) specific_runs = [] unsub = async_track_time_change(hass, lambda x: specific_runs.append(1), hour=2, minute=30, second=0) async_fire_time_changed(hass, timezone.localize(datetime(2018, 3, 25, 1, 50, 0))) await hass.async_block_till_done() assert len(specific_runs) == 0 async_fire_time_changed(hass, timezone.localize(datetime(2018, 3, 25, 3, 50, 0))) await hass.async_block_till_done() assert len(specific_runs) == 0 async_fire_time_changed(hass, timezone.localize(datetime(2018, 3, 26, 1, 50, 0))) await hass.async_block_till_done() assert len(specific_runs) == 0 async_fire_time_changed(hass, timezone.localize(datetime(2018, 3, 26, 2, 50, 0))) await hass.async_block_till_done() assert len(specific_runs) == 1 unsub()
async def async_attach_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" at_times = config[CONF_AT] @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, {"trigger": {"platform": "time", "now": now}}) removes = [ async_track_time_change( hass, time_automation_listener, hour=at_time.hour, minute=at_time.minute, second=at_time.second, ) for at_time in at_times ] @callback def remove_track_time_changes(): """Remove tracked time changes.""" for remove in removes: remove() return remove_track_time_changes
async def test_periodic_task_leaving_dst(hass): """Test periodic task behavior when leaving dst.""" tz = dt_util.get_time_zone("Europe/Vienna") dt_util.set_default_time_zone(tz) specific_runs = [] unsub = async_track_time_change(hass, lambda x: specific_runs.append(1), hour=2, minute=30, second=0) _send_time_changed( hass, tz.localize(datetime(2018, 10, 28, 2, 5, 0), is_dst=False)) await hass.async_block_till_done() assert len(specific_runs) == 0 _send_time_changed( hass, tz.localize(datetime(2018, 10, 28, 2, 55, 0), is_dst=False)) await hass.async_block_till_done() assert len(specific_runs) == 1 _send_time_changed( hass, tz.localize(datetime(2018, 10, 28, 2, 5, 0), is_dst=True)) await hass.async_block_till_done() assert len(specific_runs) == 1 _send_time_changed( hass, tz.localize(datetime(2018, 10, 28, 2, 55, 0), is_dst=True)) await hass.async_block_till_done() assert len(specific_runs) == 2 unsub()
def _async_setup_periodic_tasks(self) -> None: """Prepare periodic tasks.""" if self.hass.is_stopping or not self._get_session: # Home Assistant is shutting down return # If the db is using a socket connection, we need to keep alive # to prevent errors from unexpected disconnects if self.dialect_name != SupportedDialect.SQLITE: self._keep_alive_listener = async_track_time_interval( self.hass, self._async_keep_alive, timedelta(seconds=KEEPALIVE_TIME)) # If the commit interval is not 0, we need to commit periodically if self.commit_interval: self._commit_listener = async_track_time_interval( self.hass, self._async_commit, timedelta(seconds=self.commit_interval)) # Run nightly tasks at 4:12am self._nightly_listener = async_track_time_change( self.hass, self.async_nightly_tasks, hour=4, minute=12, second=0) # Compile short term statistics every 5 minutes self._periodic_listener = async_track_utc_time_change( self.hass, self.async_periodic_statistics, minute=range(0, 60, 5), second=10)
def async_trigger(hass, config, action): """Listen for state changes based on configuration.""" if CONF_AT in config: at_time = config.get(CONF_AT) hours, minutes, seconds = at_time.hour, at_time.minute, at_time.second else: hours = config.get(CONF_HOURS) minutes = config.get(CONF_MINUTES) seconds = config.get(CONF_SECONDS) @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, { 'trigger': { 'platform': 'time', 'now': now, }, }) return async_track_time_change(hass, time_automation_listener, hour=hours, minute=minutes, second=seconds)
def async_trigger(hass, config, action): """Listen for state changes based on configuration.""" if CONF_AT in config: at_time = config.get(CONF_AT) hours, minutes, seconds = at_time.hour, at_time.minute, at_time.second elif CONF_AFTER in config: _LOGGER.warning("'after' is deprecated for the time trigger. Please " "rename 'after' to 'at' in your configuration file.") at_time = config.get(CONF_AFTER) hours, minutes, seconds = at_time.hour, at_time.minute, at_time.second else: hours = config.get(CONF_HOURS) minutes = config.get(CONF_MINUTES) seconds = config.get(CONF_SECONDS) @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, { 'trigger': { 'platform': 'time', 'now': now, }, }) return async_track_time_change(hass, time_automation_listener, hour=hours, minute=minutes, second=seconds)
def update_entity_trigger(entity_id, new_state=None): """Update the entity trigger for the entity_id.""" # If a listener was already set up for entity, remove it. remove = entities.get(entity_id) if remove: remove() removes.remove(remove) remove = None # Check state of entity. If valid, set up a listener. if new_state: has_date = new_state.attributes["has_date"] if has_date: year = new_state.attributes["year"] month = new_state.attributes["month"] day = new_state.attributes["day"] has_time = new_state.attributes["has_time"] if has_time: hour = new_state.attributes["hour"] minute = new_state.attributes["minute"] second = new_state.attributes["second"] else: # If no time then use midnight. hour = minute = second = 0 if has_date: # If input_datetime has date, then track point in time. trigger_dt = dt_util.DEFAULT_TIME_ZONE.localize( datetime(year, month, day, hour, minute, second) ) # Only set up listener if time is now or in the future. if trigger_dt >= dt_util.now(): remove = async_track_point_in_time( hass, partial( time_automation_listener, f"time set in {entity_id}", entity_id=entity_id, ), trigger_dt, ) elif has_time: # Else if it has time, then track time change. remove = async_track_time_change( hass, partial( time_automation_listener, f"time set in {entity_id}", entity_id=entity_id, ), hour=hour, minute=minute, second=second, ) # Was a listener set up? if remove: removes.append(remove) entities[entity_id] = remove
def __init__(self, tibber_home, hass): self.tibber_home = tibber_home self.turn_ons = [] self.turn_offs = [] self._state = True self.hass = hass async_track_time_change(hass, self.set_state, hour=range(24), minute=[0, 15], second=6) async_track_time_change(hass, self._fetch_data, hour=[0], minute=[0, 12], second=1)
class ElecPriceSensor(RestoreEntity, SensorEntity): """Class to hold the prices of electricity as a sensor.""" _attr_icon = ICON _attr_native_unit_of_measurement = UNIT _attr_should_poll = False _attr_state_class = STATE_CLASS_MEASUREMENT def __init__(self, name, unique_id, pvpc_data_handler): """Initialize the sensor object.""" self._name = name self._unique_id = unique_id self._pvpc_data = pvpc_data_handler self._num_retries = 0 async def async_added_to_hass(self) -> None: """Handle entity which will be added.""" await super().async_added_to_hass() if state := await self.async_get_last_state(): self._pvpc_data.state = state.state # Update 'state' value in hour changes self.async_on_remove( async_track_time_change(self.hass, self.update_current_price, second=[0], minute=[0])) # Update prices at random time, 2 times/hour (don't want to upset API) random_minute = randint(1, 29) mins_update = [random_minute, random_minute + 30] self.async_on_remove( async_track_time_change(self.hass, self.async_update_prices, second=[0], minute=mins_update)) _LOGGER.debug( "Setup of price sensor %s (%s) with tariff '%s', " "updating prices each hour at %s min", self.name, self.entity_id, self._pvpc_data.tariff, mins_update, ) now = dt_util.utcnow() await self.async_update_prices(now) self.update_current_price(now)
async def async_added_to_hass(self): """Setup all required entities and automations.""" from electricity.tariffs import Operators if self.country not in Operators: self.hass.components.persistent_notification.create( "<p><b>Error</b>: Country <em>{}</em> not supported.</p>" "Check logs for list of supported options".format( self.country), title="Electicity component", notification_id="electricity_error_country") _LOGGER.error("Country <%s> unsupported. Supported: %s", self.country, ",".join(Operators)) return if self.operator not in Operators[self.country]: self.hass.components.persistent_notification.create( "<p><b>Error</b>: Operator <em>{}</em> not supported.</p>" "Check logs for list of supported options".format( self.operator), title="Electicity component", notification_id="electricity_error_operator") _LOGGER.error("Operator <%s> unsupported. Supported: %s", self.operator, ",".join(Operators[self.country])) return if self.plan not in Operators[self.country]\ [self.operator].tariff_periods(): self.hass.components.persistent_notification.create( "<p><b>Error</b>: Plan <em>{}</em> not supported.</p>" "Check logs for list of supported options".format(self.plan), title="Electicity component", notification_id="electricity_error_plan") _LOGGER.error( "Plan <%s> unsupported. Supported: %s", self.plan, ",".join( Operators[self.country][self.operator].tariff_periods())) return self.my_plan = Operators[self.country][self.operator](plan=self.plan) self._state = self.my_plan.current_tariff(dt_util.now()) self._tariffs = self.my_plan.tariffs() async_track_time_change(self.hass, self.timer_update, minute=range(0, 60, 15))
def start(self): """prepare task list and default ui. """ pattern = re.compile(self._pattern) states = self._hass.states.async_all() for state in states: domain = state.domain object_id = state.object_id entity_id = '{}.{}'.format(domain, object_id) if domain not in self._domains or entity_id in self._exclude: pass else: friendly_name = state.name if not self._pattern or pattern.search(friendly_name): _LOGGER.debug("添加设备:{}({})".format(friendly_name, entity_id)) self._dic_friendly_name.setdefault(friendly_name, entity_id) self._store.setdefault(domain, {}).setdefault(entity_id, {}) self._store[domain][entity_id][ 'friendly_name'] = friendly_name self._store[domain][entity_id][ 'icon'] = self.get_attributes(entity_id).get( 'icon', self._dic_icon[domain]) self._store[domain][entity_id]['entity_id'] = entity_id self._store[domain][entity_id]['duration'] = '0:00:00' self._store[domain][entity_id]['remaining'] = '0:00:00' self._store[domain][entity_id]['handle'] = None self._store[domain][entity_id][ 'operation'] = 'on' if domain == 'autonmation' or domain == 'script' else 'off' self._store[domain][entity_id]['next_operation'] = None else: _LOGGER.debug("忽略设备:{}({})".format(friendly_name, entity_id)) options = list(self._store.keys()) options.insert(0, '请选择设备类型') data = {'entity_id': self._ui[UI_INPUT_DOMAIN], 'options': options} self._hass.async_add_job( self._hass.services.async_call('input_select', SERVICE_SET_OPTIONS, data)) async_track_time_change(self._hass, self.update) # update every second
async def test_async_track_time_change(hass): """Test tracking time change.""" wildcard_runs = [] specific_runs = [] now = dt_util.utcnow() time_that_will_not_match_right_away = datetime(now.year + 1, 5, 24, 11, 59, 55, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away): unsub = async_track_time_change( hass, callback(lambda x: wildcard_runs.append(x))) unsub_utc = async_track_utc_time_change( hass, callback(lambda x: specific_runs.append(x)), second=[0, 30]) async_fire_time_changed( hass, datetime(now.year + 1, 5, 24, 12, 0, 0, 999999, tzinfo=dt_util.UTC)) await hass.async_block_till_done() assert len(specific_runs) == 1 assert len(wildcard_runs) == 1 async_fire_time_changed( hass, datetime(now.year + 1, 5, 24, 12, 0, 15, 999999, tzinfo=dt_util.UTC)) await hass.async_block_till_done() assert len(specific_runs) == 1 assert len(wildcard_runs) == 2 async_fire_time_changed( hass, datetime(now.year + 1, 5, 24, 12, 0, 30, 999999, tzinfo=dt_util.UTC)) await hass.async_block_till_done() assert len(specific_runs) == 2 assert len(wildcard_runs) == 3 unsub() unsub_utc() async_fire_time_changed( hass, datetime(now.year + 1, 5, 24, 12, 0, 30, 999999, tzinfo=dt_util.UTC)) await hass.async_block_till_done() assert len(specific_runs) == 2 assert len(wildcard_runs) == 3
async def _setSchedulerTask(self): try: _LOGGER.debug("[" + sys._getframe().f_code.co_name + "]--> [%s]%s", self.entity_id, self._attrs["au190"]) # remove all listener for fc_listener in self._scheduler_fc: #_LOGGER.debug("[" + sys._getframe().f_code.co_name + "]-- [%s]", fc_listener) fc_listener() self._scheduler_fc = [] self.enable_countDown = self._attrs["au190"]['enable_countDown'] self._countDown = self._attrs["au190"]['countDown'] if self.my_hasattr_Idx( self._attrs["au190"], 'scheduler') and self._attrs["au190"]['enable_scheduler']: for entry in self._attrs["au190"]['scheduler']: start_time = entry['start_time'] x = time.strptime(start_time, '%H:%M') #'%H:%M:%S' #_LOGGER.debug("[" + sys._getframe().f_code.co_name + "]--> [%s:%s][%s] [%s]", x.tm_hour, x.tm_min, duration, entry) fc_listener = async_track_time_change(self.hass, self._async_wake_up, hour=x.tm_hour, minute=x.tm_min, second=0) self._scheduler_fc.append(fc_listener) #--- Test #@callback #def time_automation_listener(acction_time): # """Call action with right context.""" # _LOGGER.debug("[" + sys._getframe().f_code.co_name + "]--> %s", acction_time) # self.hass.async_run_job(self._async_wake_up, {"trigger": {"platform": "time", "acction_time": acction_time}}) #remove_listener = async_track_time_change(self.hass, time_automation_listener, hour=x.tm_hour, minute=x.tm_min, second=0) #self._scheduler_fc.append(remove_listener) #fc_listener = async_track_time_change(self.hass, self._async_T1, hour=x.tm_hour, minute=x.tm_min, second=0) #self._scheduler_fc.append(fc_listener) # --- Test # fc_listener() #_LOGGER.debug("[" + sys._getframe().f_code.co_name + "]--> %s", fc_listener) except Exception as e: _LOGGER.error("[" + sys._getframe().f_code.co_name + "] Exception: " + str(e))
async def async_attach_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" at_time = config.get(CONF_AT) hours, minutes, seconds = at_time.hour, at_time.minute, at_time.second @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, {"trigger": {"platform": "time", "now": now}}) return async_track_time_change( hass, time_automation_listener, hour=hours, minute=minutes, second=seconds )
async def async_added_to_hass(self): """Setups all required entities and automations.""" self.async_on_remove( async_track_time_change(self.hass, self.timer_update, minute=range(0, 60, 15))) @callback async def initial_sync(_): await self.timer_update(dt_util.now()) self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, initial_sync)
def time_range_change_handler(evt: Event = None) -> None: for unsub_listener in self._current_day_time_range_unsub_listeners: unsub_listener() self._current_day_time_range_unsub_listeners.clear() # If time ranges have been set, listen to time changes for the start # and end time if self.get_state(self._current_day_end_time_entity) != self.get_state( self._current_day_start_time_entity ): end_time_split = self.get_state(self._current_day_end_time_entity) start_time_split = self.get_state(self._current_day_start_time_entity) if end_time_split is None or start_time_split is None: return end_time_split = end_time_split.split(":") start_time_split = start_time_split.split(":") self._current_day_time_range_unsub_listeners = [ async_track_time_change( self._hass, state_change_handler, hour=[int(end_time_split[0])], minute=[int(end_time_split[1])], second=[0], ), async_track_time_change( self._hass, state_change_handler, hour=[int(start_time_split[0])], minute=[int(start_time_split[1])], second=[0], ), ] state_change_handler()
async def async_added_to_hass(self) -> None: """Handle entity which will be added.""" await super().async_added_to_hass() # Update 'state' value in hour changes self.async_on_remove( async_track_time_change( self.hass, self.update_current_price, second=[0], minute=[0] ) ) _LOGGER.debug( "Setup of price sensor %s (%s) with tariff '%s'", self.name, self.entity_id, self.coordinator.api.tariff, )
async def async_added_to_hass(self) -> None: """Handle entity which will be added.""" if self._esios_id not in self.coordinator.api.enabled_codes: _LOGGER.critical(f"Adding {self._esios_id} to enabled-codes " f"-> {self.coordinator.api.enabled_codes}") self.coordinator.api.enabled_codes.append(self._esios_id) await super().async_added_to_hass() # Update 'state' value in hour changes for hourly price sensors if is_hourly_price(self._esios_id): self.async_on_remove( async_track_time_change(self.hass, self.update_current_price, second=[0], minute=[0])) _LOGGER.warning("Setup of %s (%s) finished", self.name, self.entity_id)
def async_trigger(hass, config, action): """Listen for state changes based on configuration.""" if CONF_AFTER in config: after = config.get(CONF_AFTER) hours, minutes, seconds = after.hour, after.minute, after.second else: hours = config.get(CONF_HOURS) minutes = config.get(CONF_MINUTES) seconds = config.get(CONF_SECONDS) @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, {"trigger": {"platform": "time", "now": now}}) return async_track_time_change(hass, time_automation_listener, hour=hours, minute=minutes, second=seconds)
async def async_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" at_time = config.get(CONF_AT) hours, minutes, seconds = at_time.hour, at_time.minute, at_time.second @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, { 'trigger': { 'platform': 'time', 'now': now, }, }) return async_track_time_change(hass, time_automation_listener, hour=hours, minute=minutes, second=seconds)
async def async_added_to_hass(self): """Setups automations.""" await super().async_added_to_hass() self.async_on_remove( async_track_time_change(self.hass, self.timer_update, hour=[0], minute=[0], second=[0])) @callback async def initial_sync(_): await self.timer_update(dt_util.now()) self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, initial_sync)
def async_trigger(hass, config, action): """Listen for state changes based on configuration.""" if CONF_AFTER in config: after = config.get(CONF_AFTER) hours, minutes, seconds = after.hour, after.minute, after.second else: hours = config.get(CONF_HOURS) minutes = config.get(CONF_MINUTES) seconds = config.get(CONF_SECONDS) @asyncio.coroutine def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_add_job(action, { 'trigger': { 'platform': 'time', 'now': now, }, }) return async_track_time_change(hass, time_automation_listener, hour=hours, minute=minutes, second=seconds)
async def async_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" hours = config.get(CONF_HOURS) minutes = config.get(CONF_MINUTES) seconds = config.get(CONF_SECONDS) # If larger units are specified, default the smaller units to zero if minutes is None and hours is not None: minutes = 0 if seconds is None and minutes is not None: seconds = 0 @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, { 'trigger': { 'platform': 'time_pattern', 'now': now, }, }) return async_track_time_change(hass, time_automation_listener, hour=hours, minute=minutes, second=seconds)
async def async_setup_entry(hass, config_entry): """Set up Z-Wave from a config entry. Will automatically load components to support devices found on the network. """ from pydispatch import dispatcher # pylint: disable=import-error from openzwave.option import ZWaveOption from openzwave.network import ZWaveNetwork from openzwave.group import ZWaveGroup config = {} if DATA_ZWAVE_CONFIG in hass.data: config = hass.data[DATA_ZWAVE_CONFIG] # Load configuration use_debug = config.get(CONF_DEBUG, DEFAULT_DEBUG) autoheal = config.get(CONF_AUTOHEAL, DEFAULT_CONF_AUTOHEAL) device_config = EntityValues( config.get(CONF_DEVICE_CONFIG), config.get(CONF_DEVICE_CONFIG_DOMAIN), config.get(CONF_DEVICE_CONFIG_GLOB)) # Setup options options = ZWaveOption( config_entry.data[CONF_USB_STICK_PATH], user_path=hass.config.config_dir, config_path=config.get(CONF_CONFIG_PATH)) options.set_console_output(use_debug) if CONF_NETWORK_KEY in config_entry.data: options.addOption("NetworkKey", config_entry.data[CONF_NETWORK_KEY]) await hass.async_add_executor_job(options.lock) network = hass.data[DATA_NETWORK] = ZWaveNetwork(options, autostart=False) hass.data[DATA_DEVICES] = {} hass.data[DATA_ENTITY_VALUES] = [] if use_debug: # pragma: no cover def log_all(signal, value=None): """Log all the signals.""" print("") print("SIGNAL *****", signal) if value and signal in (ZWaveNetwork.SIGNAL_VALUE_CHANGED, ZWaveNetwork.SIGNAL_VALUE_ADDED, ZWaveNetwork.SIGNAL_SCENE_EVENT, ZWaveNetwork.SIGNAL_NODE_EVENT, ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED, ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED, ZWaveNetwork .SIGNAL_ALL_NODES_QUERIED_SOME_DEAD): pprint(_obj_to_dict(value)) print("") dispatcher.connect(log_all, weak=False) def value_added(node, value): """Handle new added value to a node on the network.""" # Check if this value should be tracked by an existing entity for values in hass.data[DATA_ENTITY_VALUES]: values.check_value(value) for schema in DISCOVERY_SCHEMAS: if not check_node_schema(node, schema): continue if not check_value_schema( value, schema[const.DISC_VALUES][const.DISC_PRIMARY]): continue values = ZWaveDeviceEntityValues( hass, schema, value, config, device_config, registry) # We create a new list and update the reference here so that # the list can be safely iterated over in the main thread new_values = hass.data[DATA_ENTITY_VALUES] + [values] hass.data[DATA_ENTITY_VALUES] = new_values component = EntityComponent(_LOGGER, DOMAIN, hass) registry = await async_get_registry(hass) def node_added(node): """Handle a new node on the network.""" entity = ZWaveNodeEntity(node, network) def _add_node_to_component(): if hass.data[DATA_DEVICES].get(entity.unique_id): return name = node_name(node) generated_id = generate_entity_id(DOMAIN + '.{}', name, []) node_config = device_config.get(generated_id) if node_config.get(CONF_IGNORED): _LOGGER.info( "Ignoring node entity %s due to device settings", generated_id) return hass.data[DATA_DEVICES][entity.unique_id] = entity component.add_entities([entity]) if entity.unique_id: _add_node_to_component() return @callback def _on_ready(sec): _LOGGER.info("Z-Wave node %d ready after %d seconds", entity.node_id, sec) hass.async_add_job(_add_node_to_component) @callback def _on_timeout(sec): _LOGGER.warning( "Z-Wave node %d not ready after %d seconds, " "continuing anyway", entity.node_id, sec) hass.async_add_job(_add_node_to_component) hass.add_job(check_has_unique_id, entity, _on_ready, _on_timeout, hass.loop) def network_ready(): """Handle the query of all awake nodes.""" _LOGGER.info("Zwave network is ready for use. All awake nodes " "have been queried. Sleeping nodes will be " "queried when they awake.") hass.bus.fire(const.EVENT_NETWORK_READY) def network_complete(): """Handle the querying of all nodes on network.""" _LOGGER.info("Z-Wave network is complete. All nodes on the network " "have been queried") hass.bus.fire(const.EVENT_NETWORK_COMPLETE) def network_complete_some_dead(): """Handle the querying of all nodes on network.""" _LOGGER.info("Z-Wave network is complete. All nodes on the network " "have been queried, but some nodes are marked dead") hass.bus.fire(const.EVENT_NETWORK_COMPLETE_SOME_DEAD) dispatcher.connect( value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED, weak=False) dispatcher.connect( node_added, ZWaveNetwork.SIGNAL_NODE_ADDED, weak=False) dispatcher.connect( network_ready, ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED, weak=False) dispatcher.connect( network_complete, ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED, weak=False) dispatcher.connect( network_complete_some_dead, ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD, weak=False) def add_node(service): """Switch into inclusion mode.""" _LOGGER.info("Z-Wave add_node have been initialized") network.controller.add_node() def add_node_secure(service): """Switch into secure inclusion mode.""" _LOGGER.info("Z-Wave add_node_secure have been initialized") network.controller.add_node(True) def remove_node(service): """Switch into exclusion mode.""" _LOGGER.info("Z-Wwave remove_node have been initialized") network.controller.remove_node() def cancel_command(service): """Cancel a running controller command.""" _LOGGER.info("Cancel running Z-Wave command") network.controller.cancel_command() def heal_network(service): """Heal the network.""" _LOGGER.info("Z-Wave heal running") network.heal() def soft_reset(service): """Soft reset the controller.""" _LOGGER.info("Z-Wave soft_reset have been initialized") network.controller.soft_reset() def update_config(service): """Update the config from git.""" _LOGGER.info("Configuration update has been initialized") network.controller.update_ozw_config() def test_network(service): """Test the network by sending commands to all the nodes.""" _LOGGER.info("Z-Wave test_network have been initialized") network.test() def stop_network(_service_or_event): """Stop Z-Wave network.""" _LOGGER.info("Stopping Z-Wave network") network.stop() if hass.state == CoreState.running: hass.bus.fire(const.EVENT_NETWORK_STOP) def rename_node(service): """Rename a node.""" node_id = service.data.get(const.ATTR_NODE_ID) node = network.nodes[node_id] name = service.data.get(const.ATTR_NAME) node.name = name _LOGGER.info( "Renamed Z-Wave node %d to %s", node_id, name) def rename_value(service): """Rename a node value.""" node_id = service.data.get(const.ATTR_NODE_ID) value_id = service.data.get(const.ATTR_VALUE_ID) node = network.nodes[node_id] value = node.values[value_id] name = service.data.get(const.ATTR_NAME) value.label = name _LOGGER.info( "Renamed Z-Wave value (Node %d Value %d) to %s", node_id, value_id, name) def set_poll_intensity(service): """Set the polling intensity of a node value.""" node_id = service.data.get(const.ATTR_NODE_ID) value_id = service.data.get(const.ATTR_VALUE_ID) node = network.nodes[node_id] value = node.values[value_id] intensity = service.data.get(const.ATTR_POLL_INTENSITY) if intensity == 0: if value.disable_poll(): _LOGGER.info("Polling disabled (Node %d Value %d)", node_id, value_id) return _LOGGER.info("Polling disabled failed (Node %d Value %d)", node_id, value_id) else: if value.enable_poll(intensity): _LOGGER.info( "Set polling intensity (Node %d Value %d) to %s", node_id, value_id, intensity) return _LOGGER.info("Set polling intensity failed (Node %d Value %d)", node_id, value_id) def remove_failed_node(service): """Remove failed node.""" node_id = service.data.get(const.ATTR_NODE_ID) _LOGGER.info("Trying to remove zwave node %d", node_id) network.controller.remove_failed_node(node_id) def replace_failed_node(service): """Replace failed node.""" node_id = service.data.get(const.ATTR_NODE_ID) _LOGGER.info("Trying to replace zwave node %d", node_id) network.controller.replace_failed_node(node_id) def set_config_parameter(service): """Set a config parameter to a node.""" node_id = service.data.get(const.ATTR_NODE_ID) node = network.nodes[node_id] param = service.data.get(const.ATTR_CONFIG_PARAMETER) selection = service.data.get(const.ATTR_CONFIG_VALUE) size = service.data.get(const.ATTR_CONFIG_SIZE) for value in ( node.get_values(class_id=const.COMMAND_CLASS_CONFIGURATION) .values()): if value.index != param: continue if value.type in [const.TYPE_LIST, const.TYPE_BOOL]: value.data = str(selection) _LOGGER.info("Setting config parameter %s on Node %s " "with list/bool selection %s", param, node_id, str(selection)) return if value.type == const.TYPE_BUTTON: network.manager.pressButton(value.value_id) network.manager.releaseButton(value.value_id) _LOGGER.info("Setting config parameter %s on Node %s " "with button selection %s", param, node_id, selection) return value.data = int(selection) _LOGGER.info("Setting config parameter %s on Node %s " "with selection %s", param, node_id, selection) return node.set_config_param(param, selection, size) _LOGGER.info("Setting unknown config parameter %s on Node %s " "with selection %s", param, node_id, selection) def refresh_node_value(service): """Refresh the specified value from a node.""" node_id = service.data.get(const.ATTR_NODE_ID) value_id = service.data.get(const.ATTR_VALUE_ID) node = network.nodes[node_id] node.values[value_id].refresh() _LOGGER.info("Node %s value %s refreshed", node_id, value_id) def set_node_value(service): """Set the specified value on a node.""" node_id = service.data.get(const.ATTR_NODE_ID) value_id = service.data.get(const.ATTR_VALUE_ID) value = service.data.get(const.ATTR_CONFIG_VALUE) node = network.nodes[node_id] node.values[value_id].data = value _LOGGER.info("Node %s value %s set to %s", node_id, value_id, value) def print_config_parameter(service): """Print a config parameter from a node.""" node_id = service.data.get(const.ATTR_NODE_ID) node = network.nodes[node_id] param = service.data.get(const.ATTR_CONFIG_PARAMETER) _LOGGER.info("Config parameter %s on Node %s: %s", param, node_id, get_config_value(node, param)) def print_node(service): """Print all information about z-wave node.""" node_id = service.data.get(const.ATTR_NODE_ID) node = network.nodes[node_id] nice_print_node(node) def set_wakeup(service): """Set wake-up interval of a node.""" node_id = service.data.get(const.ATTR_NODE_ID) node = network.nodes[node_id] value = service.data.get(const.ATTR_CONFIG_VALUE) if node.can_wake_up(): for value_id in node.get_values( class_id=const.COMMAND_CLASS_WAKE_UP): node.values[value_id].data = value _LOGGER.info("Node %s wake-up set to %d", node_id, value) else: _LOGGER.info("Node %s is not wakeable", node_id) def change_association(service): """Change an association in the zwave network.""" association_type = service.data.get(const.ATTR_ASSOCIATION) node_id = service.data.get(const.ATTR_NODE_ID) target_node_id = service.data.get(const.ATTR_TARGET_NODE_ID) group = service.data.get(const.ATTR_GROUP) instance = service.data.get(const.ATTR_INSTANCE) node = ZWaveGroup(group, network, node_id) if association_type == 'add': node.add_association(target_node_id, instance) _LOGGER.info("Adding association for node:%s in group:%s " "target node:%s, instance=%s", node_id, group, target_node_id, instance) if association_type == 'remove': node.remove_association(target_node_id, instance) _LOGGER.info("Removing association for node:%s in group:%s " "target node:%s, instance=%s", node_id, group, target_node_id, instance) async def async_refresh_entity(service): """Refresh values that specific entity depends on.""" entity_id = service.data.get(ATTR_ENTITY_ID) async_dispatcher_send( hass, SIGNAL_REFRESH_ENTITY_FORMAT.format(entity_id)) def refresh_node(service): """Refresh all node info.""" node_id = service.data.get(const.ATTR_NODE_ID) node = network.nodes[node_id] node.refresh_info() def reset_node_meters(service): """Reset meter counters of a node.""" node_id = service.data.get(const.ATTR_NODE_ID) instance = service.data.get(const.ATTR_INSTANCE) node = network.nodes[node_id] for value in ( node.get_values(class_id=const.COMMAND_CLASS_METER) .values()): if value.index != const.INDEX_METER_RESET: continue if value.instance != instance: continue network.manager.pressButton(value.value_id) network.manager.releaseButton(value.value_id) _LOGGER.info("Resetting meters on node %s instance %s....", node_id, instance) return _LOGGER.info("Node %s on instance %s does not have resettable " "meters.", node_id, instance) def heal_node(service): """Heal a node on the network.""" node_id = service.data.get(const.ATTR_NODE_ID) update_return_routes = service.data.get(const.ATTR_RETURN_ROUTES) node = network.nodes[node_id] _LOGGER.info("Z-Wave node heal running for node %s", node_id) node.heal(update_return_routes) def test_node(service): """Send test messages to a node on the network.""" node_id = service.data.get(const.ATTR_NODE_ID) messages = service.data.get(const.ATTR_MESSAGES) node = network.nodes[node_id] _LOGGER.info("Sending %s test-messages to node %s.", messages, node_id) node.test(messages) def start_zwave(_service_or_event): """Startup Z-Wave network.""" _LOGGER.info("Starting Z-Wave network...") network.start() hass.bus.fire(const.EVENT_NETWORK_START) async def _check_awaked(): """Wait for Z-wave awaked state (or timeout) and finalize start.""" _LOGGER.debug( "network state: %d %s", network.state, network.state_str) start_time = dt_util.utcnow() while True: waited = int((dt_util.utcnow()-start_time).total_seconds()) if network.state >= network.STATE_AWAKED: # Need to be in STATE_AWAKED before talking to nodes. _LOGGER.info("Z-Wave ready after %d seconds", waited) break elif waited >= const.NETWORK_READY_WAIT_SECS: # Wait up to NETWORK_READY_WAIT_SECS seconds for the Z-Wave # network to be ready. _LOGGER.warning( "Z-Wave not ready after %d seconds, continuing anyway", waited) _LOGGER.info( "final network state: %d %s", network.state, network.state_str) break else: await asyncio.sleep(1, loop=hass.loop) hass.async_add_job(_finalize_start) hass.add_job(_check_awaked) def _finalize_start(): """Perform final initializations after Z-Wave network is awaked.""" polling_interval = convert( config.get(CONF_POLLING_INTERVAL), int) if polling_interval is not None: network.set_poll_interval(polling_interval, False) poll_interval = network.get_poll_interval() _LOGGER.info("Z-Wave polling interval set to %d ms", poll_interval) hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_network) # Register node services for Z-Wave network hass.services.register(DOMAIN, const.SERVICE_ADD_NODE, add_node) hass.services.register(DOMAIN, const.SERVICE_ADD_NODE_SECURE, add_node_secure) hass.services.register(DOMAIN, const.SERVICE_REMOVE_NODE, remove_node) hass.services.register(DOMAIN, const.SERVICE_CANCEL_COMMAND, cancel_command) hass.services.register(DOMAIN, const.SERVICE_HEAL_NETWORK, heal_network) hass.services.register(DOMAIN, const.SERVICE_SOFT_RESET, soft_reset) hass.services.register(DOMAIN, const.SERVICE_UPDATE_CONFIG, update_config) hass.services.register(DOMAIN, const.SERVICE_TEST_NETWORK, test_network) hass.services.register(DOMAIN, const.SERVICE_STOP_NETWORK, stop_network) hass.services.register(DOMAIN, const.SERVICE_RENAME_NODE, rename_node, schema=RENAME_NODE_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_RENAME_VALUE, rename_value, schema=RENAME_VALUE_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_SET_CONFIG_PARAMETER, set_config_parameter, schema=SET_CONFIG_PARAMETER_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_SET_NODE_VALUE, set_node_value, schema=SET_NODE_VALUE_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_REFRESH_NODE_VALUE, refresh_node_value, schema=REFRESH_NODE_VALUE_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_PRINT_CONFIG_PARAMETER, print_config_parameter, schema=PRINT_CONFIG_PARAMETER_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_REMOVE_FAILED_NODE, remove_failed_node, schema=NODE_SERVICE_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_REPLACE_FAILED_NODE, replace_failed_node, schema=NODE_SERVICE_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_CHANGE_ASSOCIATION, change_association, schema=CHANGE_ASSOCIATION_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_SET_WAKEUP, set_wakeup, schema=SET_WAKEUP_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_PRINT_NODE, print_node, schema=NODE_SERVICE_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_REFRESH_ENTITY, async_refresh_entity, schema=REFRESH_ENTITY_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_REFRESH_NODE, refresh_node, schema=NODE_SERVICE_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_RESET_NODE_METERS, reset_node_meters, schema=RESET_NODE_METERS_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_SET_POLL_INTENSITY, set_poll_intensity, schema=SET_POLL_INTENSITY_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_HEAL_NODE, heal_node, schema=HEAL_NODE_SCHEMA) hass.services.register(DOMAIN, const.SERVICE_TEST_NODE, test_node, schema=TEST_NODE_SCHEMA) # Setup autoheal if autoheal: _LOGGER.info("Z-Wave network autoheal is enabled") async_track_time_change(hass, heal_network, hour=0, minute=0, second=0) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_zwave) hass.services.async_register(DOMAIN, const.SERVICE_START_NETWORK, start_zwave) for entry_component in SUPPORTED_PLATFORMS: hass.async_create_task(hass.config_entries.async_forward_entry_setup( config_entry, entry_component)) return True