def test_old_config_if_action_before(self): """Test for action before.""" automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'event', event.CONF_EVENT_TYPE: 'test_event', 'execute_service': 'test.automation', 'if': { CONF_PLATFORM: 'time', time.CONF_BEFORE: '10:00' } } }) before_10 = dt_util.now().replace(hour=8) after_10 = dt_util.now().replace(hour=14) with patch('homeassistant.components.automation.time.dt_util.now', return_value=before_10): self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) with patch('homeassistant.components.automation.time.dt_util.now', return_value=after_10): self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls))
def test_if_action_after(self): automation.setup( self.hass, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": {"platform": "time", "after": "10:00"}, "action": {"service": "test.automation"}, } }, ) before_10 = dt_util.now().replace(hour=8) after_10 = dt_util.now().replace(hour=14) with patch("homeassistant.components.automation.time.dt_util.now", return_value=before_10): self.hass.bus.fire("test_event") self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) with patch("homeassistant.components.automation.time.dt_util.now", return_value=after_10): self.hass.bus.fire("test_event") self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls))
async def test_if_action_after(hass, calls): """Test for if action after.""" assert await async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: { 'trigger': { 'platform': 'event', 'event_type': 'test_event' }, 'condition': { 'condition': 'time', 'after': '10:00', }, 'action': { 'service': 'test.automation' } } }) before_10 = dt_util.now().replace(hour=8) after_10 = dt_util.now().replace(hour=14) with patch('homeassistant.helpers.condition.dt_util.now', return_value=before_10): hass.bus.async_fire('test_event') await hass.async_block_till_done() assert 0 == len(calls) with patch('homeassistant.helpers.condition.dt_util.now', return_value=after_10): hass.bus.async_fire('test_event') await hass.async_block_till_done() assert 1 == len(calls)
def keep_alive(self, now): """Keep the api alive.""" from pyicloud.exceptions import PyiCloud2FARequiredError if self.api is None: self.reset_account_icloud() if self.api is None: return if self.api.requires_2fa: try: self.api.authenticate() except PyiCloud2FARequiredError: if self._trusted_device is None: self.icloud_need_trusted_device() return if self._verification_code is None: self.icloud_need_verification_code() return if self._verification_code == "waiting": return if self.api.validate_verification_code(self._trusted_device, self._verification_code): self._verification_code = None else: self.api.authenticate() currentminutes = dt_util.now().hour * 60 + dt_util.now().minute for devicename in self.devices: interval = self._intervals.get(devicename, 1) if (currentminutes % interval == 0) or (interval > 10 and currentminutes % interval in [2, 4]): self.update_device(devicename)
def test_if_action_before(self): automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'event', 'event_type': 'test_event' }, 'condition': { 'platform': 'time', 'before': '10:00', }, 'action': { 'execute_service': 'test.automation' } } }) before_10 = dt_util.now().replace(hour=8) after_10 = dt_util.now().replace(hour=14) with patch('homeassistant.components.automation.time.dt_util.now', return_value=before_10): self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) with patch('homeassistant.components.automation.time.dt_util.now', return_value=after_10): self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls))
def test_if_action_list_weekday(self): automation.setup( self.hass, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": {"platform": "time", "weekday": ["mon", "tue"]}, "action": {"service": "test.automation"}, } }, ) days_past_monday = dt_util.now().weekday() monday = dt_util.now() - timedelta(days=days_past_monday) tuesday = monday + timedelta(days=1) wednesday = tuesday + timedelta(days=1) with patch("homeassistant.components.automation.time.dt_util.now", return_value=monday): self.hass.bus.fire("test_event") self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) with patch("homeassistant.components.automation.time.dt_util.now", return_value=tuesday): self.hass.bus.fire("test_event") self.hass.pool.block_till_done() self.assertEqual(2, len(self.calls)) with patch("homeassistant.components.automation.time.dt_util.now", return_value=wednesday): self.hass.bus.fire("test_event") self.hass.pool.block_till_done() self.assertEqual(2, len(self.calls))
def test_get_age(self): """Test get_age.""" diff = dt_util.now() - timedelta(seconds=0) self.assertEqual(dt_util.get_age(diff), "0 seconds") diff = dt_util.now() - timedelta(seconds=1) self.assertEqual(dt_util.get_age(diff), "1 second") diff = dt_util.now() - timedelta(seconds=30) self.assertEqual(dt_util.get_age(diff), "30 seconds") diff = dt_util.now() - timedelta(minutes=5) self.assertEqual(dt_util.get_age(diff), "5 minutes") diff = dt_util.now() - timedelta(minutes=1) self.assertEqual(dt_util.get_age(diff), "1 minute") diff = dt_util.now() - timedelta(minutes=300) self.assertEqual(dt_util.get_age(diff), "5 hours") diff = dt_util.now() - timedelta(minutes=320) self.assertEqual(dt_util.get_age(diff), "5 hours") diff = dt_util.now() - timedelta(minutes=2*60*24) self.assertEqual(dt_util.get_age(diff), "2 days") diff = dt_util.now() - timedelta(minutes=32*60*24) self.assertEqual(dt_util.get_age(diff), "1 month") diff = dt_util.now() - timedelta(minutes=365*60*24) self.assertEqual(dt_util.get_age(diff), "1 year")
def test_old_config_if_action_list_weekday(self): automation.setup( self.hass, { automation.DOMAIN: { CONF_PLATFORM: "event", event.CONF_EVENT_TYPE: "test_event", "execute_service": "test.automation", "if": {CONF_PLATFORM: "time", time.CONF_WEEKDAY: ["mon", "tue"]}, } }, ) days_past_monday = dt_util.now().weekday() monday = dt_util.now() - timedelta(days=days_past_monday) tuesday = monday + timedelta(days=1) wednesday = tuesday + timedelta(days=1) with patch("homeassistant.components.automation.time.dt_util.now", return_value=monday): self.hass.bus.fire("test_event") self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) with patch("homeassistant.components.automation.time.dt_util.now", return_value=tuesday): self.hass.bus.fire("test_event") self.hass.pool.block_till_done() self.assertEqual(2, len(self.calls)) with patch("homeassistant.components.automation.time.dt_util.now", return_value=wednesday): self.hass.bus.fire("test_event") self.hass.pool.block_till_done() self.assertEqual(2, len(self.calls))
def test_if_action_after(self): """Test for if action after.""" assert _setup_component(self.hass, automation.DOMAIN, { automation.DOMAIN: { 'trigger': { 'platform': 'event', 'event_type': 'test_event' }, 'condition': { 'platform': 'time', 'after': '10:00', }, 'action': { 'service': 'test.automation' } } }) before_10 = dt_util.now().replace(hour=8) after_10 = dt_util.now().replace(hour=14) with patch('homeassistant.helpers.condition.dt_util.now', return_value=before_10): self.hass.bus.fire('test_event') self.hass.block_till_done() self.assertEqual(0, len(self.calls)) with patch('homeassistant.helpers.condition.dt_util.now', return_value=after_10): self.hass.bus.fire('test_event') self.hass.block_till_done() self.assertEqual(1, len(self.calls))
def update(self): """Get the latest data from the smart plug.""" if self._last_tried is not None: last_try_s = (dt_util.now() - self._last_tried).total_seconds()/60 retry_seconds = min(self._n_tried*2, 10) - last_try_s if self._n_tried > 0 and retry_seconds > 0: _LOGGER.warning("Waiting %s s to retry", retry_seconds) return _state = 'unknown' try: self._last_tried = dt_util.now() _state = self.smartplug.state except urllib.error.HTTPError: _LOGGER.error("D-Link connection problem") if _state == 'unknown': self._n_tried += 1 self.available = False _LOGGER.warning("Failed to connect to D-Link switch") return self.state = _state self.available = True self.temperature = self.smartplug.temperature self.current_consumption = self.smartplug.current_consumption self.total_consumption = self.smartplug.total_consumption self._n_tried = 0
def test_if_action_one_weekday(self): """Test for if action with one weekday.""" assert _setup_component(self.hass, automation.DOMAIN, { automation.DOMAIN: { 'trigger': { 'platform': 'event', 'event_type': 'test_event' }, 'condition': { 'platform': 'time', 'weekday': 'mon', }, 'action': { 'service': 'test.automation' } } }) days_past_monday = dt_util.now().weekday() monday = dt_util.now() - timedelta(days=days_past_monday) tuesday = monday + timedelta(days=1) with patch('homeassistant.helpers.condition.dt_util.now', return_value=monday): self.hass.bus.fire('test_event') self.hass.block_till_done() self.assertEqual(1, len(self.calls)) with patch('homeassistant.helpers.condition.dt_util.now', return_value=tuesday): self.hass.bus.fire('test_event') self.hass.block_till_done() self.assertEqual(1, len(self.calls))
def test_get_age(self): """Test get_age.""" diff = dt_util.now() - timedelta(seconds=0) assert dt_util.get_age(diff) == "0 seconds" diff = dt_util.now() - timedelta(seconds=1) assert dt_util.get_age(diff) == "1 second" diff = dt_util.now() - timedelta(seconds=30) assert dt_util.get_age(diff) == "30 seconds" diff = dt_util.now() - timedelta(minutes=5) assert dt_util.get_age(diff) == "5 minutes" diff = dt_util.now() - timedelta(minutes=1) assert dt_util.get_age(diff) == "1 minute" diff = dt_util.now() - timedelta(minutes=300) assert dt_util.get_age(diff) == "5 hours" diff = dt_util.now() - timedelta(minutes=320) assert dt_util.get_age(diff) == "5 hours" diff = dt_util.now() - timedelta(minutes=2*60*24) assert dt_util.get_age(diff) == "2 days" diff = dt_util.now() - timedelta(minutes=32*60*24) assert dt_util.get_age(diff) == "1 month" diff = dt_util.now() - timedelta(minutes=365*60*24) assert dt_util.get_age(diff) == "1 year"
def timer(self): """Check if we are allowed to update.""" boundary = dt_util.now() - self.interval if boundary > self.last_update: self.last_update = dt_util.now() return True return False
def test_old_config_if_action_before(self): automation.setup( self.hass, { automation.DOMAIN: { CONF_PLATFORM: "event", event.CONF_EVENT_TYPE: "test_event", "execute_service": "test.automation", "if": {CONF_PLATFORM: "time", time.CONF_BEFORE: "10:00"}, } }, ) before_10 = dt_util.now().replace(hour=8) after_10 = dt_util.now().replace(hour=14) with patch("homeassistant.components.automation.time.dt_util.now", return_value=before_10): self.hass.bus.fire("test_event") self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) with patch("homeassistant.components.automation.time.dt_util.now", return_value=after_10): self.hass.bus.fire("test_event") self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls))
def test_old_config_if_action_one_weekday(self): """Test for action with one weekday.""" automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'event', event.CONF_EVENT_TYPE: 'test_event', 'execute_service': 'test.automation', 'if': { CONF_PLATFORM: 'time', time.CONF_WEEKDAY: 'mon', } } }) days_past_monday = dt_util.now().weekday() monday = dt_util.now() - timedelta(days=days_past_monday) tuesday = monday + timedelta(days=1) with patch('homeassistant.components.automation.time.dt_util.now', return_value=monday): self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) with patch('homeassistant.components.automation.time.dt_util.now', return_value=tuesday): self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls))
def update(self, **kwargs): """Fetch the latest info from USPS.""" import myusps self.packages = myusps.get_packages(self.session) self.mail = myusps.get_mail(self.session, now().date()) self.attribution = myusps.ATTRIBUTION _LOGGER.debug("Mail, request date: %s, list: %s", now().date(), self.mail) _LOGGER.debug("Package list: %s", self.packages)
def update_period(self): """Parse the templates and store a datetime tuple in _period.""" start = None end = None # Parse start if self._start is not None: try: start_rendered = self._start.render() except (TemplateError, TypeError) as ex: HistoryStatsHelper.handle_template_exception(ex, 'start') return start = dt_util.parse_datetime(start_rendered) if start is None: try: start = dt_util.as_local(dt_util.utc_from_timestamp( math.floor(float(start_rendered)))) except ValueError: _LOGGER.error("Parsing error: start must be a datetime" "or a timestamp") return # Parse end if self._end is not None: try: end_rendered = self._end.render() except (TemplateError, TypeError) as ex: HistoryStatsHelper.handle_template_exception(ex, 'end') return end = dt_util.parse_datetime(end_rendered) if end is None: try: end = dt_util.as_local(dt_util.utc_from_timestamp( math.floor(float(end_rendered)))) except ValueError: _LOGGER.error("Parsing error: end must be a datetime " "or a timestamp") return # Calculate start or end using the duration if start is None: start = end - self._duration if end is None: end = start + self._duration if start > dt_util.now(): # History hasn't been written yet for this period return if dt_util.now() < end: # No point in making stats of the future end = dt_util.now() self._period = start, end
def _update_info(self): """Scan the network for devices. Returns boolean if scanning successful. """ _LOGGER.info("Scanning") from nmap import PortScanner, PortScannerError scanner = PortScanner() options = "-F --host-timeout 5s " exclude = "--exclude " if self.home_interval: boundary = dt_util.now() - self.home_interval last_results = [device for device in self.last_results if device.last_update > boundary] if last_results: # Pylint is confused here. # pylint: disable=no-member exclude_hosts = self.exclude + [device.ip for device in last_results] else: exclude_hosts = self.exclude else: last_results = [] exclude_hosts = self.exclude if exclude_hosts: exclude = " --exclude {}".format(",".join(exclude_hosts)) options += exclude try: result = scanner.scan(hosts=self.hosts, arguments=options) except PortScannerError: return False now = dt_util.now() for ipv4, info in result["scan"].items(): if info["status"]["state"] != "up": continue name = info["hostnames"][0]["name"] if info["hostnames"] else ipv4 # Mac address only returned if nmap ran as root mac = info["addresses"].get("mac") or _arp(ipv4) if mac is None: continue last_results.append(Device(mac.upper(), name, ipv4, now)) self.last_results = last_results _LOGGER.info("nmap scan successful") return True
def async_update(self): """Get date and look whether it is a holiday.""" # Default is no workday self._state = STATE_OFF # Get iso day of the week (1 = Monday, 7 = Sunday) day = datetime.datetime.today().isoweekday() - 1 day_of_week = day_to_string(day) if self.is_include(day_of_week, dt_util.now()): self._state = STATE_ON if self.is_exclude(day_of_week, dt_util.now()): self._state = STATE_OFF
def _update_info(self): """Scan the network for devices. Returns boolean if scanning successful. """ _LOGGER.info('Scanning') from nmap import PortScanner, PortScannerError scanner = PortScanner() options = '-F --host-timeout 5s ' if self.home_interval: boundary = dt_util.now() - self.home_interval last_results = [device for device in self.last_results if device.last_update > boundary] if last_results: exclude_hosts = self.exclude + [device.ip for device in last_results] else: exclude_hosts = self.exclude else: last_results = [] exclude_hosts = self.exclude if exclude_hosts: options += ' --exclude {}'.format(','.join(exclude_hosts)) try: result = scanner.scan(hosts=' '.join(self.hosts), arguments=options) except PortScannerError: return False now = dt_util.now() for ipv4, info in result['scan'].items(): if info['status']['state'] != 'up': continue name = info['hostnames'][0]['name'] if info['hostnames'] else ipv4 # Mac address only returned if nmap ran as root mac = info['addresses'].get('mac') or _arp(ipv4) if mac is None: continue last_results.append(Device(mac.upper(), name, ipv4, now)) self.last_results = last_results _LOGGER.info('nmap scan successful') return True
def data_format(self): """Format raw data into easily accessible dict.""" for attr_key in self.conditions: value = MONITORED_CONDITIONS[attr_key] try: primary_key = value[0][0] sensor_key = value[0][1] if primary_key in self.raw_data: sensor_value = self.raw_data[primary_key][sensor_key] # Format sensor for better readability if (attr_key == ATTR_NEW_VERSION and sensor_value == '0.0.0.0'): sensor_value = 'Latest' elif attr_key == ATTR_UPTIME: sensor_value = round(sensor_value / (3600 * 24), 2) elif attr_key == ATTR_LAST_RESTART: last_restart = ( dt.now() - timedelta(seconds=sensor_value)) sensor_value = last_restart.strftime( '%Y-%m-%d %H:%M:%S') elif attr_key == ATTR_STATUS: if sensor_value: sensor_value = 'Online' else: sensor_value = 'Offline' elif attr_key == ATTR_LOCAL_IP: if not self.raw_data['wan']['online']: sensor_value = STATE_UNKNOWN self.data[attr_key] = sensor_value except KeyError: _LOGGER.error("Router does not support %s field. " "Please remove %s from monitored_conditions", sensor_key, attr_key) self.data[attr_key] = STATE_UNKNOWN
def _update_info(self): """Check the Bbox for devices. Returns boolean if scanning successful. """ _LOGGER.info("Scanning...") import pybbox box = pybbox.Bbox(ip=self.host) result = box.get_all_connected_devices() now = dt_util.now() last_results = [] for device in result: if device['active'] != 1: continue last_results.append( Device(device['macaddress'], device['hostname'], device['ipaddress'], now)) self.last_results = last_results _LOGGER.info("Scan successful") return True
def check_light_on_dev_state_change(entity, old_state, new_state): """Handle tracked device state changes.""" lights_are_on = group.is_on(hass, light_group) light_needed = not (lights_are_on or sun.is_on(hass)) # These variables are needed for the elif check now = dt_util.now() start_point = calc_time_for_light_when_sunset() # Do we need lights? if light_needed: logger.info("Home coming event for %s. Turning lights on", entity) light.async_turn_on(hass, light_ids, profile=light_profile) # Are we in the time span were we would turn on the lights # if someone would be home? # Check this by seeing if current time is later then the point # in time when we would start putting the lights on. elif (start_point and start_point < now < sun.next_setting(hass)): # Check for every light if it would be on if someone was home # when the fading in started and turn it on if so for index, light_id in enumerate(light_ids): if now > start_point + index * LIGHT_TRANSITION_TIME: light.async_turn_on(hass, light_id) else: # If this light didn't happen to be turned on yet so # will all the following then, break. break
def _update_current_price(self): state = None max_price = 0 min_price = 10000 sum_price = 0 num = 0 now = dt_util.now() for key, price_total in self._tibber_home.price_total.items(): price_time = dt_util.as_local(dt_util.parse_datetime(key)) price_total = round(price_total, 3) time_diff = (now - price_time).total_seconds()/60 if (not self._last_data_timestamp or price_time > self._last_data_timestamp): self._last_data_timestamp = price_time if 0 <= time_diff < 60: state = price_total self._last_updated = price_time if now.date() == price_time.date(): max_price = max(max_price, price_total) min_price = min(min_price, price_total) num += 1 sum_price += price_total self._state = state self._device_state_attributes['max_price'] = max_price self._device_state_attributes['avg_price'] = round(sum_price / num, 3) self._device_state_attributes['min_price'] = min_price return state is not None
def test_time(self): """Test the time at a different location.""" state = self.hass.states.get('sensor.worldclock_sensor') assert state is not None assert state.state == dt_util.now( time_zone=self.time_zone).strftime('%H:%M')
def test_now(self, mock_is_safe): """Test now method.""" now = dt_util.now() with patch.dict(template.ENV.globals, {'now': lambda: now}): assert now.isoformat() == \ template.Template('{{ now().isoformat() }}', self.hass).render()
def passcode_timeout_listener(self, eventignored): if self._passcode_timeoutat is not None: if now() > self._passcode_timeoutat: self._panel_locked = False self._passcode_timeoutat = None self._passcodeAttemptNo = 0 self.schedule_update_ha_state()
def __init__(self, interval, log_file): """Initialize the parser.""" self.interval = interval self.log_file = log_file self.data = list() self.last_update = dt_util.now() self.ip_regex = dict()
def test_in_progress_event(self, mock_next_event): """Test that we can create an event trigger on device.""" middle_of_event = dt_util.now() \ - dt_util.dt.timedelta(minutes=30) event = { 'start': { 'dateTime': middle_of_event.isoformat() }, 'end': { 'dateTime': (middle_of_event + dt_util.dt .timedelta(minutes=60)) .isoformat() }, 'summary': 'Test Event in Progress', 'reminders': {'useDefault': True}, 'id': 'aioehgni435lihje', 'status': 'confirmed', 'updated': '2016-11-05T15:52:07.329Z', 'organizer': { 'email': '*****@*****.**', 'displayName': 'Organizer Name', 'self': True, }, 'created': '2016-11-05T15:52:07.000Z', 'iCalUID': '*****@*****.**', 'sequence': 0, 'creator': { 'email': '*****@*****.**', 'displayName': 'Organizer Name', }, 'etag': '"2956722254658000"', 'kind': 'calendar#event', 'htmlLink': 'https://www.google.com/calendar/event?eid=*******', } mock_next_event.return_value.event = event device_name = 'Test Event in Progress' device_id = 'test_event_in_progress' cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id, {'name': device_name}) self.assertEquals(cal.name, device_name) self.assertEquals(cal.state, STATE_ON) self.assertFalse(cal.offset_reached()) self.assertEquals(cal.device_state_attributes, { 'message': event['summary'], 'all_day': False, 'offset_reached': False, 'start_time': middle_of_event.strftime(DATE_STR_FORMAT), 'end_time': (middle_of_event + dt_util.dt.timedelta(minutes=60)) .strftime(DATE_STR_FORMAT), 'location': '', 'description': '' })
def time(before=None, after=None, weekday=None): """Test if local time condition matches. Handle the fact that time is continuous and we may be testing for a period that crosses midnight. In that case it is easier to test for the opposite. "(23:59 <= now < 00:01)" would be the same as "not (00:01 <= now < 23:59)". """ now = dt_util.now() now_time = now.time() if after is None: after = dt_util.dt.time(0) if before is None: before = dt_util.dt.time(23, 59, 59, 999999) if after < before: if not after <= now_time < before: return False else: if before <= now_time < after: return False if weekday is not None: now_weekday = WEEKDAYS[now.weekday()] if isinstance(weekday, str) and weekday != now_weekday or now_weekday not in weekday: return False return True
async def async_run(self): """Entities updater loop.""" def temperature_limit(config, mac, temp): """Set limits for temperature measurement in °C or °F.""" fmac = ':'.join(mac[i:i + 2] for i in range(0, len(mac), 2)) if config[CONF_DEVICES]: for device in config[CONF_DEVICES]: if fmac in device["mac"].upper(): if CONF_TEMPERATURE_UNIT in device: if device[CONF_TEMPERATURE_UNIT] == TEMP_FAHRENHEIT: temp_fahrenheit = temp * 9 / 5 + 32 return temp_fahrenheit break return temp _LOGGER.debug("Entities updater loop started!") sensors_by_mac = {} sensors = [] batt = {} # batteries rssi = {} mibeacon_cnt = 0 new_sensor_message = False ts_last = dt_util.now() ts_now = ts_last data = None await asyncio.sleep(0) while True: try: advevent = await asyncio.wait_for(self.dataqueue.get(), 1) if advevent is None: _LOGGER.debug("Entities updater loop stopped") return True data = advevent self.dataqueue.task_done() except asyncio.TimeoutError: pass if data: mibeacon_cnt += 1 mac = data["mac"] # the RSSI value will be averaged for all valuable packets if mac not in rssi: rssi[mac] = [] rssi[mac].append(int(data["rssi"])) batt_attr = None sensortype = data["type"] firmware = data["firmware"] t_i, h_i, m_i, p_i, c_i, i_i, f_i, cn_i, bu_i, w_i, im_i, v_i, b_i = MMTS_DICT[sensortype][0] if mac not in sensors_by_mac: sensors = [] if t_i != 9: sensors.insert(t_i, TemperatureSensor(self.config, mac, sensortype, firmware)) if h_i != 9: sensors.insert(h_i, HumiditySensor(self.config, mac, sensortype, firmware)) if m_i != 9: sensors.insert(m_i, MoistureSensor(self.config, mac, sensortype, firmware)) if p_i != 9: sensors.insert(p_i, PressureSensor(self.config, mac, sensortype, firmware)) if c_i != 9: sensors.insert(c_i, ConductivitySensor(self.config, mac, sensortype, firmware)) if i_i != 9: sensors.insert(i_i, IlluminanceSensor(self.config, mac, sensortype, firmware)) if f_i != 9: sensors.insert(f_i, FormaldehydeSensor(self.config, mac, sensortype, firmware)) if cn_i != 9: sensors.insert(cn_i, ConsumableSensor(self.config, mac, sensortype, firmware)) if bu_i != 9: sensors.insert(bu_i, ButtonSensor(self.config, mac, sensortype, firmware)) if w_i != 9: sensors.insert(w_i, WeightSensor(self.config, mac, sensortype, firmware)) if im_i != 9: sensors.insert(im_i, ImpedanceSensor(self.config, mac, sensortype, firmware)) if self.batt_entities and (v_i != 9) and "voltage" in data: # only add voltage sensor if available in data try: sensors.insert(v_i, VoltageSensor(self.config, mac, sensortype, firmware)) except IndexError: pass if self.batt_entities and (b_i != 9): sensors.insert(b_i, BatterySensor(self.config, mac, sensortype, firmware)) if len(sensors) != 0: sensors_by_mac[mac] = sensors self.add_entities(sensors) else: sensors = sensors_by_mac[mac] if data["data"] is False: data = None continue # store found readings per device if (b_i != 9): if "battery" in data: batt[mac] = int(data["battery"]) batt_attr = batt[mac] if self.batt_entities: sensors[b_i].collect(data) else: try: batt_attr = batt[mac] except KeyError: batt_attr = None # measuring sensors if "temperature" in data and (t_i != 9): # schedule an immediate update of kettle temperature if sensortype in KETTLES: entity = sensors[t_i] entity.collect(data, batt_attr) if entity.ready_for_update is True: entity.rssi_values = rssi[mac].copy() entity.async_schedule_update_ha_state(True) rssi[mac].clear() entity.pending_update = False else: if ( temperature_limit( self.config, mac, CONF_TMAX ) >= data["temperature"] >= temperature_limit(self.config, mac, CONF_TMIN) ): sensors[t_i].collect(data, batt_attr) elif self.log_spikes: _LOGGER.error( "Temperature spike: %s (%s)", data["temperature"], mac, ) if "humidity" in data and (h_i != 9): if CONF_HMAX >= data["humidity"] >= CONF_HMIN: sensors[h_i].collect(data, batt_attr) elif self.log_spikes: _LOGGER.error( "Humidity spike: %s (%s)", data["humidity"], mac, ) if "conductivity" in data and (c_i != 9): sensors[c_i].collect(data, batt_attr) if "pressure" in data and (p_i != 9): sensors[p_i].collect(data, batt_attr) if "moisture" in data and (m_i != 9): sensors[m_i].collect(data, batt_attr) if "illuminance" in data and (i_i != 9): sensors[i_i].collect(data, batt_attr) if "formaldehyde" in data and (f_i != 9): sensors[f_i].collect(data, batt_attr) if "consumable" in data and (cn_i != 9): sensors[cn_i].collect(data, batt_attr) if "button" in data and (bu_i != 9): button = sensors[bu_i] # schedule an immediate update of button sensors button.collect(data, batt_attr) if button.ready_for_update is True: button.rssi_values = rssi[mac].copy() button.async_schedule_update_ha_state(True) button.pending_update = False if "weight" in data and (w_i != 9): weight = sensors[w_i] # schedule an immediate update of weight sensors weight.collect(data, batt_attr) if weight.ready_for_update is True: weight.rssi_values = rssi[mac].copy() weight.async_schedule_update_ha_state(True) weight.pending_update = False if "impedance" in data and (im_i != 9): impedance = sensors[im_i] # schedule an immediate update of impedance sensors impedance.collect(data, batt_attr) if impedance.ready_for_update is True: impedance.rssi_values = rssi[mac].copy() impedance.async_schedule_update_ha_state(True) impedance.pending_update = False if self.batt_entities: if "voltage" in data and (v_i != 9): try: sensors[v_i].collect(data, batt_attr) except IndexError: if new_sensor_message is False: _LOGGER.warning( "New voltage sensor found with MAC address %s. " "Make sure you use only one advertisement type (not all)", mac ) new_sensor_message = True pass data = None ts_now = dt_util.now() if ts_now - ts_last < timedelta(seconds=self.period): continue ts_last = ts_now # restarting scanner self.monitor.restart() # for every updated device for mac, elist in sensors_by_mac.items(): for entity in elist: if entity.pending_update is True: if entity.ready_for_update is True: entity.rssi_values = rssi[mac].copy() entity.async_schedule_update_ha_state(True) for mac in rssi: rssi[mac].clear() _LOGGER.debug( "%i MiBeacon BLE ADV messages processed for %i measuring device(s).", mibeacon_cnt, len(sensors_by_mac), ) mibeacon_cnt = 0
def cluster_command(self, tsn, command_id, args): from zigpy.zcl.clusters.general import LevelControl if tsn == self._prev_tsn: return self._prev_tsn = tsn command = LevelControl.server_commands.get(command_id, ('unknown', ))[0] event_data = { 'entity_id': self._entity.entity_id, 'channel': self._identifier, 'command': command } if command in ('move_with_on_off', 'step_with_on_off'): self.on_off = True if command in ('step', 'step_with_on_off'): if args[0] == 0: event_data['up_down'] = 1 elif args[0] == 1: event_data['up_down'] = -1 if args[1] == 0: self._value = 254 self._entity._state = 1 event_data['step'] = args[1] self._value += event_data['up_down'] * event_data['step'] if self._value <= 0: if self.on_off: self._entity._state = 0 self.value = 1 self._value = 1 elif self._value > 255: self._value = 254 self.value = 254 else: self.value = int(self._value) if self.on_off: self._entity._state = 1 # elif command == 'move_to_level_with_on_off': # self.value = self._value elif command in ('move_with_on_off', 'move'): if args[0] == 0: event_data['up_down'] = 1 elif args[0] == 1: event_data['up_down'] = -1 self.step = args[1] * event_data['up_down'] event_data['step'] = args[1] if self.start_time is None: self.start_time = dt_util.utcnow().timestamp() elif command == 'stop': if self.start_time is not None: delta_time = dt_util.utcnow().timestamp() - self.start_time _LOGGER.debug('Delta: %s move: %s', delta_time, delta_time * self.step) self._value += int(delta_time * self.step) self.start_time = None if self._value <= 1: if self.on_off: self._entity._state = 0 self.value = 1 self._value = 1 elif self._value >= 254: self._value = 254 self.value = 254 else: self.value = int(self._value) if self.on_off: self._entity._state = 1 self._entity.hass.bus.fire('click', event_data) _LOGGER.debug('click event [tsn:%s] %s', tsn, event_data) self._entity._device_state_attributes.update({ 'last seen': dt_util.now(), self._identifier: self.value, 'last command': command }) self._entity.schedule_update_ha_state()
} @property def state(self): """Return the state of the calendar event.""" if (event := self.event) is None: return STATE_OFF event = normalize_event(event) start = event["dt_start"] end = event["dt_end"] if start is None or end is None: return STATE_OFF now = dt.now() if start <= now < end: return STATE_ON return STATE_OFF async def async_get_events(self, hass, start_date, end_date): """Return calendar events within a datetime range.""" raise NotImplementedError() class CalendarEventView(http.HomeAssistantView): """View to retrieve calendar content.""" url = "/api/calendars/{entity_id}"
async def _generate_fig(self, width, height): if (dt_util.now() - self._last_update) < datetime.timedelta(minutes=1): return if (self._home.last_data_timestamp - dt_util.now()).total_seconds() > 11 * 3600: await self._home.update_info_and_price_info() self._last_update = dt_util.now() if self._home.has_real_time_consumption: self.realtime_state = self.hass.states.get( f"sensor.real_time_consumption_{slugify(self._name)}") if self.realtime_state is None: self.realtime_state = self.hass.states.get( f"sensor.power_{slugify(self._name)}") else: self.realtime_state = None prices = [] dates = [] now = dt_util.now() for key, price_total in self._home.price_total.items(): key = dt_util.as_local(dt_util.parse_datetime(key)) if key.date() < now.date(): continue prices.append(price_total) dates.append(key) hour = now.hour dt = datetime.timedelta(minutes=now.minute) if len(prices) < max(10, hour + 1): _LOGGER.warning("No prices") return # To plot the final hour prices.append(prices[-1]) dates.append(dates[-1] + datetime.timedelta(hours=1)) plt.close("all") plt.style.use("ggplot") x_fmt = mdates.DateFormatter("%H", tz=tz.tzlocal()) fig = plt.figure(figsize=(width / 200, height / 200), dpi=200) ax = fig.add_subplot(111) ax.grid(which="major", axis="x", linestyle="-", color="gray", alpha=0.25) plt.tick_params( axis="both", which="both", bottom=False, top=False, labelbottom=True, left=False, right=False, labelleft=True, ) ax.step( [dates[hour] + dt, dates[hour] + dt], [min(prices) - 3, max(prices) + 3], "r", alpha=0.35, linestyle="-", zorder=2, where='post', ) ax.step(dates, prices, "#039be5", where='post') if not self.realtime_state: ax.fill_between(dates, 0, prices, facecolor="#039be5", alpha=0.25, step='post') plt.text( dates[hour] + dt, prices[hour], "{:.2f}".format(prices[hour]) + self._home.currency, fontsize=14, zorder=3, ) min_length = 7 if len(dates) > 25 else 5 last_hour = -1 * min_length for _hour in range(1, len(prices) - 1): if abs(_hour - last_hour) < min_length or abs(_hour - hour) < min_length: continue if (prices[_hour - 1] > prices[_hour] < prices[_hour + 1]) or ( prices[_hour - 1] < prices[_hour] > prices[_hour + 1]): last_hour = _hour plt.text( dates[_hour], prices[_hour], str(round(prices[_hour], 2)) + self._home.currency + "\nat {:02}:00".format(_hour % 24), fontsize=10, va="bottom", zorder=3, ) ax.set_ylim((min(prices) - 0.005, max(prices) + 0.0075)) ax.set_xlim((dates[0], dates[-1])) ax.set_facecolor("white") ax.xaxis.set_major_formatter(x_fmt) fig.autofmt_xdate() if self.realtime_state is not None: hour_to_fetch = 24 for _hour in self._cons_data: if _hour.get("consumption") is None: self._cons_data.remove(_hour) continue hour_to_fetch = (now - dt_util.parse_datetime( _hour.get("from"))).total_seconds() / 3600 if hour_to_fetch > 2: cons_data = await self._home.get_historic_data( int(hour_to_fetch)) cons_data = [] if cons_data is None else cons_data for key in cons_data: if key in self._cons_data: continue self._cons_data.append(key) dates_cons = [] cons = [] for _hour in self._cons_data: _cons = _hour.get("consumption") if _cons is None: continue date = dt_util.as_local( dt_util.parse_datetime( _hour.get("from"))) + datetime.timedelta(minutes=30) if date < dates[0]: continue dates_cons.append(date) cons.append(_cons) ax2 = ax.twinx() ax2.grid(False) ax2.xaxis.set_major_formatter(x_fmt) ax2.vlines( x=dates_cons, ymin=0, ymax=cons, color="#039be5", edgecolor="#c3d5e8", alpha=0.6, linewidth=8, zorder=5, ) acc_cons_hour = self.realtime_state.attributes.get( "accumulatedConsumptionLastHour") if acc_cons_hour: timestamp = dt_util.parse_datetime( self.realtime_state.attributes.get("timestamp")).replace( minute=30, second=0) ax2.vlines( [timestamp], 0, [acc_cons_hour], color="#68A7C6", linewidth=8, edgecolor="#c3d5e8", alpha=0.35, zorder=5, ) try: await self.hass.async_add_executor_job(fig.savefig, self._path) except Exception: # noqa: E731 _LOGGER.debug("Failed to generate image", exc_info=True) plt.close(fig) plt.close("all")
import logging from homeassistant.util import dt as dt_util from datetime import timedelta from csv import writer import matplotlib.pyplot as plt _LOGGER = logging.getLogger(__name__) TOP_EXPONENT = 2 BOTTOM_EXPONENT = 2.2 if __name__ == '__main__': from custom_components.circadian_white.sensor import CircadianWhiteSensor # When to simulate dt_util.set_default_time_zone(dt_util.get_time_zone('Australia/Melbourne')) now = dt_util.now() day_start = now.replace(hour=7, minute=1, second=42) day_middle = now.replace(hour=12, minute=19, second=32) day_end = now.replace(hour=17, minute=37, second=26) start = now.replace(hour=0, minute=0, second=0) def one_day(): circ = CircadianWhiteSensor('test_math', 2500, 4500, 6500, TOP_EXPONENT, BOTTOM_EXPONENT) circ._x_limit = 2 circ._day_start = day_start circ._day_middle = day_middle circ._day_end = day_end circ._last_sun_update = start circ._calculate_day_events() print("Sensor Current Config:")
def update(self, data: dict = None): self._attrs['msg_received'] += 1 self._state = now().isoformat(timespec='seconds') self.schedule_update_ha_state()
def native_value(self) -> datetime: """Return the uptime of the client.""" if self.client.uptime < 1000000000: return (dt_util.now() - timedelta(seconds=self.client.uptime)).isoformat() return dt_util.utc_from_timestamp(float(self.client.uptime)).isoformat()
def process_event(self, event): """ This is the core logic function. The possible states and things that can change our state are: Actions: isensor dsensor timeout arm_home arm_away disarm trigger Current State: disarmed X X X armh pend * trig pending(T1) X X arma X X dis trig armed(h/a) trig warn X X X dis trig warning(T1) X X trig X X dis trig triggered(T2) X X last X X dis * As the only non-timed states are disarmed, armed_home and armed_away, they are the only ones we can return to after an alarm. """ old = self._state # Update state if applicable if event == Events.Disarm: self._state = STATE_ALARM_DISARMED elif event == Events.Trigger: self._state = STATE_ALARM_TRIGGERED elif old == STATE_ALARM_DISARMED: if event == Events.ArmHome: self._state = STATE_ALARM_ARMED_HOME elif event == Events.ArmAway: self._state = STATE_ALARM_PENDING elif old == STATE_ALARM_PENDING: if event == Events.Timeout: self._state = STATE_ALARM_ARMED_AWAY elif old == STATE_ALARM_ARMED_HOME or \ old == STATE_ALARM_ARMED_AWAY: if event == Events.ImmediateTrip: self._state = STATE_ALARM_TRIGGERED elif event == Events.DelayedTrip: self._state = STATE_ALARM_WARNING elif old == STATE_ALARM_WARNING: if event == Events.Timeout: self._state = STATE_ALARM_TRIGGERED elif old == STATE_ALARM_TRIGGERED: if event == Events.Timeout: self._state = self._returnto new = self._state if old != new: _LOGGER.debug("Alarm changing from {} to {}".format(old, new)) # Things to do on entering state if new == STATE_ALARM_WARNING: _LOGGER.debug("Turning on warning") switch.turn_on(self._hass, self._warning) self._timeoutat = now() + self._pending_time elif new == STATE_ALARM_TRIGGERED: _LOGGER.debug("Turning on alarm") switch.turn_on(self._hass, self._alarm) self._timeoutat = now() + self._trigger_time elif new == STATE_ALARM_PENDING: _LOGGER.debug("Pending user leaving house") switch.turn_on(self._hass, self._warning) self._timeoutat = now() + self._pending_time self._returnto = STATE_ALARM_ARMED_AWAY self.setsignals(False) elif new == STATE_ALARM_ARMED_HOME: self._returnto = new self.setsignals(True) elif new == STATE_ALARM_DISARMED: self._returnto = new self.clearsignals() # Things to do on leaving state if old == STATE_ALARM_WARNING or old == STATE_ALARM_PENDING: _LOGGER.debug("Turning off warning") switch.turn_off(self._hass, self._warning) elif old == STATE_ALARM_TRIGGERED: _LOGGER.debug("Turning off alarm") switch.turn_off(self._hass, self._alarm) # Let HA know that something changed self.schedule_update_ha_state()
def time_change_listener(self, eventignored): """ I just treat the time events as a periodic check, its simpler then (re-/un-)registration """ if self._timeoutat is not None: if now() > self._timeoutat: self._timeoutat = None self.process_event(Events.Timeout)
def __init__(self, name, unit): """Initialize the uptime sensor.""" self._name = name self._unit = unit self.initial = dt_util.now() self._state = None
async def async_run(self): """Entities updater loop.""" def temperature_limit(config, mac, temp): """Set limits for temperature measurement in °C or °F.""" fmac = ':'.join(mac[i:i + 2] for i in range(0, len(mac), 2)) if config[CONF_DEVICES]: for device in config[CONF_DEVICES]: if fmac in device["mac"].upper(): if CONF_TEMPERATURE_UNIT in device: if device[ CONF_TEMPERATURE_UNIT] == TEMP_FAHRENHEIT: temp_fahrenheit = temp * 9 / 5 + 32 return temp_fahrenheit break return temp _LOGGER.debug("Entities updater loop started!") sensors_by_mac = {} sensors = [] batt = {} # batteries rssi = {} mibeacon_cnt = 0 ts_last = dt_util.now() ts_now = ts_last data = None await asyncio.sleep(0) while True: try: # advevent = self.dataqueue.get(block=True, timeout=1) advevent = await asyncio.wait_for(self.dataqueue.get(), 1) if advevent is None: _LOGGER.debug("Entities updater loop stopped") return True data = advevent self.dataqueue.task_done() # except queue.Empty: except asyncio.TimeoutError: pass if data: mibeacon_cnt += 1 mac = data["mac"] # the RSSI value will be averaged for all valuable packets if mac not in rssi: rssi[mac] = [] rssi[mac].append(int(data["rssi"])) batt_attr = None sensortype = data["type"] t_i, h_i, m_i, c_i, i_i, f_i, cn_i, b_i = MMTS_DICT[ sensortype][0] if mac not in sensors_by_mac: sensors = [] if t_i != 9: sensors.insert( t_i, TemperatureSensor(self.config, mac, sensortype)) if h_i != 9: sensors.insert( h_i, HumiditySensor(self.config, mac, sensortype)) if m_i != 9: sensors.insert( m_i, MoistureSensor(self.config, mac, sensortype)) if c_i != 9: sensors.insert( c_i, ConductivitySensor(self.config, mac, sensortype)) if i_i != 9: sensors.insert( i_i, IlluminanceSensor(self.config, mac, sensortype)) if f_i != 9: sensors.insert( f_i, FormaldehydeSensor(self.config, mac, sensortype)) if cn_i != 9: sensors.insert( cn_i, ConsumableSensor(self.config, mac, sensortype)) if self.batt_entities and (b_i != 9): sensors.insert( b_i, BatterySensor(self.config, mac, sensortype)) if len(sensors) != 0: sensors_by_mac[mac] = sensors self.add_entities(sensors) else: sensors = sensors_by_mac[mac] if data["data"] is False: data = None continue # store found readings per device if (b_i != 9): if "battery" in data: batt[mac] = int(data["battery"]) batt_attr = batt[mac] if self.batt_entities: sensors[b_i].collect(data) else: try: batt_attr = batt[mac] except KeyError: batt_attr = None # measuring sensors if "temperature" in data: # dirty hack for kettle temperature data if sensortype in KETTLES: entity = sensors[t_i] entity.collect(data, batt_attr) if entity.ready_for_update is True: entity.rssi_values = rssi[mac].copy() entity.async_schedule_update_ha_state(True) rssi[mac].clear() entity.pending_update = False else: if (temperature_limit(self.config, mac, CONF_TMAX) >= data["temperature"] >= temperature_limit( self.config, mac, CONF_TMIN)): sensors[t_i].collect(data, batt_attr) elif self.log_spikes: _LOGGER.error( "Temperature spike: %s (%s)", data["temperature"], mac, ) if "humidity" in data: if CONF_HMAX >= data["humidity"] >= CONF_HMIN: sensors[h_i].collect(data, batt_attr) elif self.log_spikes: _LOGGER.error( "Humidity spike: %s (%s)", data["humidity"], mac, ) if "conductivity" in data: sensors[c_i].collect(data, batt_attr) if "moisture" in data: sensors[m_i].collect(data, batt_attr) if "illuminance" in data: sensors[i_i].collect(data, batt_attr) if "formaldehyde" in data: sensors[f_i].collect(data, batt_attr) if "consumable" in data: sensors[cn_i].collect(data, batt_attr) data = None ts_now = dt_util.now() if ts_now - ts_last < timedelta(seconds=self.period): continue ts_last = ts_now # restarting scanner self.monitor.restart() # for every updated device for mac, elist in sensors_by_mac.items(): for entity in elist: if entity.pending_update is True: if entity.ready_for_update is True: entity.rssi_values = rssi[mac].copy() entity.async_schedule_update_ha_state(True) for mac in rssi: rssi[mac].clear() _LOGGER.debug( "%i MiBeacon BLE ADV messages processed for %i measuring device(s).", mibeacon_cnt, len(sensors_by_mac), ) mibeacon_cnt = 0
def _update_period(self): # pylint: disable=r0912 """Parse the templates and calculate a datetime tuples.""" start = end = None now = dt_util.now() # Parse start _LOGGER.debug("Process start template: %s", self._start_template) if self._start_template is not None: try: start_rendered = self._start_template.render() except (TemplateError, TypeError) as ex: self.handle_template_exception(ex, "start") return if isinstance(start_rendered, str): start = dt_util.parse_datetime(start_rendered) if start is None: try: start = dt_util.as_local( dt_util.utc_from_timestamp( math.floor(float(start_rendered)))) except ValueError: _LOGGER.error("Parsing error: start must be a datetime" "or a timestamp") return # Parse end _LOGGER.debug("Process end template: %s", self._end_template) if self._end_template is not None: try: end_rendered = self._end_template.render() except (TemplateError, TypeError) as ex: self.handle_template_exception(ex, "end") return if isinstance(end_rendered, str): end = dt_util.parse_datetime(end_rendered) if end is None: try: end = dt_util.as_local( dt_util.utc_from_timestamp( math.floor(float(end_rendered)))) except ValueError: _LOGGER.error("Parsing error: end must be a datetime " "or a timestamp") return # Calculate start or end using the duration _LOGGER.debug("Process duration: %s", self._duration) if self._duration is not None: if start is None: if end is None: end = now start = end - self._duration else: end = start + self._duration _LOGGER.debug("Start: %s, End: %s", start, end) if start is None or end is None: return if start > now: # History hasn't been written yet for this period return if now < end: # No point in making stats of the future end = now self._period = start, end self.start = start.replace(microsecond=0).isoformat() self.end = end.replace(microsecond=0).isoformat()
async def test_device_registry_calls(hass): """Test device registry entries for hassio.""" dev_reg = async_get(hass) supervisor_mock_data = { "addons": [ { "name": "test", "slug": "test", "installed": True, "update_available": False, "version": "1.0.0", "version_latest": "1.0.0", "repository": "test", "url": "https://github.com/home-assistant/addons/test", }, { "name": "test2", "slug": "test2", "installed": True, "update_available": False, "version": "1.0.0", "version_latest": "1.0.0", "url": "https://github.com", }, ] } os_mock_data = { "board": "odroid-n2", "boot": "A", "update_available": False, "version": "5.12", "version_latest": "5.12", } with patch.dict(os.environ, MOCK_ENVIRON), patch( "homeassistant.components.hassio.HassIO.get_supervisor_info", return_value=supervisor_mock_data, ), patch( "homeassistant.components.hassio.HassIO.get_os_info", return_value=os_mock_data, ): config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN) config_entry.add_to_hass(hass) assert await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() assert len(dev_reg.devices) == 3 supervisor_mock_data = { "addons": [ { "name": "test2", "slug": "test2", "installed": True, "update_available": False, "version": "1.0.0", "version_latest": "1.0.0", "url": "https://github.com", }, ] } # Test that when addon is removed, next update will remove the add-on and subsequent updates won't with patch( "homeassistant.components.hassio.HassIO.get_supervisor_info", return_value=supervisor_mock_data, ), patch( "homeassistant.components.hassio.HassIO.get_os_info", return_value=os_mock_data, ): async_fire_time_changed(hass, dt_util.now() + timedelta(hours=1)) await hass.async_block_till_done() assert len(dev_reg.devices) == 2 async_fire_time_changed(hass, dt_util.now() + timedelta(hours=2)) await hass.async_block_till_done() assert len(dev_reg.devices) == 2 supervisor_mock_data = { "addons": [ { "name": "test2", "slug": "test2", "installed": True, "update_available": False, "version": "1.0.0", "version_latest": "1.0.0", "url": "https://github.com", }, { "name": "test3", "slug": "test3", "installed": True, "update_available": False, "version": "1.0.0", "version_latest": "1.0.0", "url": "https://github.com", }, ] } # Test that when addon is added, next update will reload the entry so we register # a new device with patch( "homeassistant.components.hassio.HassIO.get_supervisor_info", return_value=supervisor_mock_data, ), patch( "homeassistant.components.hassio.HassIO.get_os_info", return_value=os_mock_data, ): async_fire_time_changed(hass, dt_util.now() + timedelta(hours=3)) await hass.async_block_till_done() assert len(dev_reg.devices) == 3
def get_next_departure( schedule: Any, start_station_id: Any, end_station_id: Any, offset: cv.time_period, include_tomorrow: bool = False, ) -> dict: """Get the next departure for the given schedule.""" now = dt_util.now().replace(tzinfo=None) + offset now_date = now.strftime(dt_util.DATE_STR_FORMAT) yesterday = now - datetime.timedelta(days=1) yesterday_date = yesterday.strftime(dt_util.DATE_STR_FORMAT) tomorrow = now + datetime.timedelta(days=1) tomorrow_date = tomorrow.strftime(dt_util.DATE_STR_FORMAT) # Fetch all departures for yesterday, today and optionally tomorrow, # up to an overkill maximum in case of a departure every minute for those # days. limit = 24 * 60 * 60 * 2 tomorrow_select = tomorrow_where = tomorrow_order = "" if include_tomorrow: limit = int(limit / 2 * 3) tomorrow_name = tomorrow.strftime("%A").lower() tomorrow_select = f"calendar.{tomorrow_name} AS tomorrow," tomorrow_where = f"OR calendar.{tomorrow_name} = 1" tomorrow_order = f"calendar.{tomorrow_name} DESC," sql_query = f""" SELECT trip.trip_id, trip.route_id, time(origin_stop_time.arrival_time) AS origin_arrival_time, time(origin_stop_time.departure_time) AS origin_depart_time, date(origin_stop_time.departure_time) AS origin_depart_date, origin_stop_time.drop_off_type AS origin_drop_off_type, origin_stop_time.pickup_type AS origin_pickup_type, origin_stop_time.shape_dist_traveled AS origin_dist_traveled, origin_stop_time.stop_headsign AS origin_stop_headsign, origin_stop_time.stop_sequence AS origin_stop_sequence, origin_stop_time.timepoint AS origin_stop_timepoint, time(destination_stop_time.arrival_time) AS dest_arrival_time, time(destination_stop_time.departure_time) AS dest_depart_time, destination_stop_time.drop_off_type AS dest_drop_off_type, destination_stop_time.pickup_type AS dest_pickup_type, destination_stop_time.shape_dist_traveled AS dest_dist_traveled, destination_stop_time.stop_headsign AS dest_stop_headsign, destination_stop_time.stop_sequence AS dest_stop_sequence, destination_stop_time.timepoint AS dest_stop_timepoint, calendar.{yesterday.strftime("%A").lower()} AS yesterday, calendar.{now.strftime("%A").lower()} AS today, {tomorrow_select} calendar.start_date AS start_date, calendar.end_date AS end_date FROM trips trip INNER JOIN calendar calendar ON trip.service_id = calendar.service_id INNER JOIN stop_times origin_stop_time ON trip.trip_id = origin_stop_time.trip_id INNER JOIN stops start_station ON origin_stop_time.stop_id = start_station.stop_id INNER JOIN stop_times destination_stop_time ON trip.trip_id = destination_stop_time.trip_id INNER JOIN stops end_station ON destination_stop_time.stop_id = end_station.stop_id WHERE (calendar.{yesterday.strftime("%A").lower()} = 1 OR calendar.{now.strftime("%A").lower()} = 1 {tomorrow_where} ) AND start_station.stop_id = :origin_station_id AND end_station.stop_id = :end_station_id AND origin_stop_sequence < dest_stop_sequence AND calendar.start_date <= :today AND calendar.end_date >= :today ORDER BY calendar.{yesterday.strftime("%A").lower()} DESC, calendar.{now.strftime("%A").lower()} DESC, {tomorrow_order} origin_stop_time.departure_time LIMIT :limit """ result = schedule.engine.execute( text(sql_query), origin_station_id=start_station_id, end_station_id=end_station_id, today=now_date, limit=limit, ) # Create lookup timetable for today and possibly tomorrow, taking into # account any departures from yesterday scheduled after midnight, # as long as all departures are within the calendar date range. timetable = {} yesterday_start = today_start = tomorrow_start = None yesterday_last = today_last = "" for row in result: if row["yesterday"] == 1 and yesterday_date >= row["start_date"]: extras = {"day": "yesterday", "first": None, "last": False} if yesterday_start is None: yesterday_start = row["origin_depart_date"] if yesterday_start != row["origin_depart_date"]: idx = f"{now_date} {row['origin_depart_time']}" timetable[idx] = {**row, **extras} yesterday_last = idx if row["today"] == 1: extras = {"day": "today", "first": False, "last": False} if today_start is None: today_start = row["origin_depart_date"] extras["first"] = True if today_start == row["origin_depart_date"]: idx_prefix = now_date else: idx_prefix = tomorrow_date idx = f"{idx_prefix} {row['origin_depart_time']}" timetable[idx] = {**row, **extras} today_last = idx if ( "tomorrow" in row and row["tomorrow"] == 1 and tomorrow_date <= row["end_date"] ): extras = {"day": "tomorrow", "first": False, "last": None} if tomorrow_start is None: tomorrow_start = row["origin_depart_date"] extras["first"] = True if tomorrow_start == row["origin_depart_date"]: idx = f"{tomorrow_date} {row['origin_depart_time']}" timetable[idx] = {**row, **extras} # Flag last departures. for idx in filter(None, [yesterday_last, today_last]): timetable[idx]["last"] = True _LOGGER.debug("Timetable: %s", sorted(timetable.keys())) item = {} for key in sorted(timetable.keys()): if dt_util.parse_datetime(key) > now: item = timetable[key] _LOGGER.debug( "Departure found for station %s @ %s -> %s", start_station_id, key, item ) break if item == {}: return {} # Format arrival and departure dates and times, accounting for the # possibility of times crossing over midnight. origin_arrival = now if item["origin_arrival_time"] > item["origin_depart_time"]: origin_arrival -= datetime.timedelta(days=1) origin_arrival_time = ( f"{origin_arrival.strftime(dt_util.DATE_STR_FORMAT)} " f"{item['origin_arrival_time']}" ) origin_depart_time = f"{now_date} {item['origin_depart_time']}" dest_arrival = now if item["dest_arrival_time"] < item["origin_depart_time"]: dest_arrival += datetime.timedelta(days=1) dest_arrival_time = ( f"{dest_arrival.strftime(dt_util.DATE_STR_FORMAT)} " f"{item['dest_arrival_time']}" ) dest_depart = dest_arrival if item["dest_depart_time"] < item["dest_arrival_time"]: dest_depart += datetime.timedelta(days=1) dest_depart_time = ( f"{dest_depart.strftime(dt_util.DATE_STR_FORMAT)} " f"{item['dest_depart_time']}" ) depart_time = dt_util.parse_datetime(origin_depart_time) arrival_time = dt_util.parse_datetime(dest_arrival_time) origin_stop_time = { "Arrival Time": origin_arrival_time, "Departure Time": origin_depart_time, "Drop Off Type": item["origin_drop_off_type"], "Pickup Type": item["origin_pickup_type"], "Shape Dist Traveled": item["origin_dist_traveled"], "Headsign": item["origin_stop_headsign"], "Sequence": item["origin_stop_sequence"], "Timepoint": item["origin_stop_timepoint"], } destination_stop_time = { "Arrival Time": dest_arrival_time, "Departure Time": dest_depart_time, "Drop Off Type": item["dest_drop_off_type"], "Pickup Type": item["dest_pickup_type"], "Shape Dist Traveled": item["dest_dist_traveled"], "Headsign": item["dest_stop_headsign"], "Sequence": item["dest_stop_sequence"], "Timepoint": item["dest_stop_timepoint"], } return { "trip_id": item["trip_id"], "route_id": item["route_id"], "day": item["day"], "first": item["first"], "last": item["last"], "departure_time": depart_time, "arrival_time": arrival_time, "origin_stop_time": origin_stop_time, "destination_stop_time": destination_stop_time, }
def __init__(self, hass, username, password, name, ignored_devices, getevents): # pylint: disable=too-many-arguments self.hass = hass self.username = username self.password = password self.accountname = name self._max_wait_seconds = 120 self._request_interval_seconds = 10 self._interval = 1 self.api = None self.devices = {} self.getevents = getevents self.events = {} self.currentevents = {} self.nextevents = {} self._ignored_devices = ignored_devices self._ignored_identifiers = {} self.entity_id = generate_entity_id( ENTITY_ID_FORMAT_ICLOUD, self.accountname, hass=self.hass) if self.username is None or self.password is None: _LOGGER.error('Must specify a username and password') else: try: # Attempt the login to iCloud self.api = PyiCloudService(self.username, self.password, verify=True) for device in self.api.devices: status = device.status(DEVICESTATUSSET) devicename = re.sub(r"(\s|\W|')", '', status['name']).lower() if (devicename not in self.devices and devicename not in self._ignored_devices): idevice = IDevice(self.hass, self, devicename, device) idevice.update_ha_state() self.devices[devicename] = idevice elif devicename in self._ignored_devices: self._ignored_identifiers[devicename] = device if self.getevents: from_dt = dt_util.now() to_dt = from_dt + timedelta(days=7) events = self.api.calendar.events(from_dt, to_dt) new_events = sorted(events.list_of_dict, key=operator.attrgetter('startDate')) starttime = None endtime = None duration = None title = None tz = pytz.utc location = None guid = None for event in new_events: tz = event['tz'] if tz is None: tz = pytz.utc else: tz = timezone(tz) tempnow = dt_util.now(tz) guid = event['guid'] starttime = event['startDate'] startdate = datetime(starttime[1], starttime[2], starttime[3], starttime[4], starttime[5], 0, 0, tz) endtime = event['endDate'] enddate = datetime(endtime[1], endtime[2], endtime[3], endtime[4], endtime[5], 0, 0, tz) duration = event['duration'] title = event['title'] location = event['location'] strnow = tempnow.strftime("%Y%m%d%H%M%S") strstart = startdate.strftime("%Y%m%d%H%M%S") strend = enddate.strftime("%Y%m%d%H%M%S") if strnow > strstart and strend > strnow: ievent = IEvent(self.hass, self, guid, TYPE_CURRENT) ievent.update_ha_state() self.currentevents[guid] = ievent self.currentevents[guid].keep_alive(starttime, endtime, duration, title, tz, location) starttime = None endtime = None duration = None title = None tz = pytz.utc location = None guid = None starttime = None endtime = None duration = None title = None tz = pytz.utc location = None guid = None for event in new_events: tz = event['tz'] if tz is None: tz = pytz.utc else: tz = timezone(tz) tempnow = dt_util.now(tz) guid = event['guid'] starttime = event['startDate'] startdate = datetime(starttime[1], starttime[2], starttime[3], starttime[4], starttime[5], 0, 0, tz) endtime = event['endDate'] enddate = datetime(endtime[1], endtime[2], endtime[3], endtime[4], endtime[5], 0, 0, tz) duration = event['duration'] title = event['title'] location = event['location'] strnow = tempnow.strftime("%Y%m%d%H%M%S") strstart = startdate.strftime("%Y%m%d%H%M%S") strend = enddate.strftime("%Y%m%d%H%M%S") if strnow < strstart: ievent = IEvent(self.hass, self, guid, TYPE_NEXT) ievent.update_ha_state() self.nextevents[guid] = ievent self.nextevents[guid].keep_alive(starttime, endtime, duration, title, tz, location) except PyiCloudFailedLoginException as error: _LOGGER.error('Error logging into iCloud Service: %s', error)
def update(self): """ Gets the time and updates the states. """ self._state = dt_util.datetime_to_time_str( dt_util.now(time_zone=self._time_zone))
def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the Workday sensor.""" add_holidays = config[CONF_ADD_HOLIDAYS] remove_holidays = config[CONF_REMOVE_HOLIDAYS] country = config[CONF_COUNTRY] days_offset = config[CONF_OFFSET] excludes = config[CONF_EXCLUDES] province = config.get(CONF_PROVINCE) sensor_name = config[CONF_NAME] workdays = config[CONF_WORKDAYS] year = (get_date(dt.now()) + timedelta(days=days_offset)).year obj_holidays = getattr(holidays, country)(years=year) if province: # 'state' and 'prov' are not interchangeable, so need to make # sure we use the right one if hasattr(obj_holidays, "PROVINCES") and province in obj_holidays.PROVINCES: obj_holidays = getattr(holidays, country)(prov=province, years=year) elif hasattr(obj_holidays, "STATES") and province in obj_holidays.STATES: obj_holidays = getattr(holidays, country)(state=province, years=year) else: _LOGGER.error("There is no province/state %s in country %s", province, country) return # Add custom holidays try: obj_holidays.append(add_holidays) except TypeError: _LOGGER.debug("No custom holidays or invalid holidays") # Remove holidays try: for date in remove_holidays: try: # is this formatted as a date? if dt.parse_date(date): # remove holiday by date removed = obj_holidays.pop(date) _LOGGER.debug("Removed %s", date) else: # remove holiday by name _LOGGER.debug("Treating '%s' as named holiday", date) removed = obj_holidays.pop_named(date) for holiday in removed: _LOGGER.debug("Removed %s by name '%s'", holiday, date) except KeyError as unmatched: _LOGGER.warning("No holiday found matching %s", unmatched) except TypeError: _LOGGER.debug("No holidays to remove or invalid holidays") _LOGGER.debug("Found the following holidays for your configuration:") for date, name in sorted(obj_holidays.items()): _LOGGER.debug("%s %s", date, name) add_entities( [ IsWorkdaySensor(obj_holidays, workdays, excludes, days_offset, sensor_name) ], True, )
async def sync_time(now: datetime) -> None: _LOGGER.debug("Syncing time with Home Assistant") await spa.set_time( time.strptime(str(dt_util.now()), "%Y-%m-%d %H:%M:%S.%f%z"))
def keep_alive(self): """ Keeps the api alive """ if self.api is None: try: # Attempt the login to iCloud self.api = PyiCloudService(self.username, self.password, verify=True) except PyiCloudFailedLoginException as error: _LOGGER.error('Error logging into iCloud Service: %s', error) if self.api is not None: self.api.authenticate() for devicename in self.devices: self.devices[devicename].keep_alive() if self.getevents: from_dt = dt_util.now() to_dt = from_dt + timedelta(days=7) events = self.api.calendar.events(from_dt, to_dt) new_events = sorted(events.list_of_dict, key=operator.attrgetter('startDate')) starttime = None endtime = None duration = None title = None tz = pytz.utc location = None guid = None for event in new_events: tz = event['tz'] if tz is None: tz = pytz.utc else: tz = timezone(tz) tempnow = dt_util.now(tz) guid = event['guid'] starttime = event['startDate'] startdate = datetime(starttime[1], starttime[2], starttime[3], starttime[4], starttime[5], 0, 0, tz) endtime = event['endDate'] enddate = datetime(endtime[1], endtime[2], endtime[3], endtime[4], endtime[5], 0, 0, tz) duration = event['duration'] title = event['title'] location = event['location'] strnow = tempnow.strftime("%Y%m%d%H%M%S") strstart = startdate.strftime("%Y%m%d%H%M%S") strend = enddate.strftime("%Y%m%d%H%M%S") if strnow > strstart and strend > strnow: if guid not in self.currentevents: ievent = IEvent(self.hass, self, guid, TYPE_CURRENT) ievent.update_ha_state() self.currentevents[guid] = ievent self.currentevents[guid].keep_alive(starttime, endtime, duration, title, tz, location) starttime = None endtime = None duration = None title = None tz = pytz.utc location = None guid = None for addedevent in self.currentevents: found = False eventguid = self.currentevents[addedevent].eventguid for event in new_events: if event['guid'] == eventguid: found = True if not found: ent_id = generate_entity_id(ENTITY_ID_FORMAT_EVENT, eventguid, hass=self.hass) self.hass.states.remove(ent_id) del self.currentevents[addedevent] else: self.currentevents[addedevent].check_alive() starttime = None endtime = None duration = None title = None tz = pytz.utc location = None guid = None for event in new_events: tz = event['tz'] if tz is None: tz = pytz.utc else: tz = timezone(tz) tempnow = dt_util.now(tz) guid = event['guid'] starttime = event['startDate'] startdate = datetime(starttime[1], starttime[2], starttime[3], starttime[4], starttime[5], 0, 0, tz) endtime = event['endDate'] enddate = datetime(endtime[1], endtime[2], endtime[3], endtime[4], endtime[5], 0, 0, tz) duration = event['duration'] title = event['title'] location = event['location'] strnow = tempnow.strftime("%Y%m%d%H%M%S") strstart = startdate.strftime("%Y%m%d%H%M%S") strend = enddate.strftime("%Y%m%d%H%M%S") if strnow < strstart: if guid not in self.nextevents: ievent = IEvent(self.hass, self, guid, TYPE_NEXT) ievent.update_ha_state() self.nextevents[guid] = ievent self.nextevents[guid].keep_alive(starttime, endtime, duration, title, tz, location) for addedevent in self.nextevents: found = False eventguid = self.nextevents[addedevent].eventguid for event in new_events: if event['guid'] == eventguid: found = True if not found: ent_id = generate_entity_id(ENTITY_ID_FORMAT_EVENT, eventguid, hass=self.hass) self.hass.states.remove(ent_id) del self.nextevents[addedevent] else: self.nextevents[addedevent].check_alive()
def process_image(self, image): """Process an image.""" self._image = Image.open(io.BytesIO(bytearray(image))) self._image_width, self._image_height = self._image.size # resize image if different then default if self._scale != DEAULT_SCALE: newsize = (self._image_width * self._scale, self._image_width * self._scale) self._image.thumbnail(newsize, Image.ANTIALIAS) self._image_width, self._image_height = self._image.size with io.BytesIO() as output: self._image.save(output, format="JPEG") image = output.getvalue() _LOGGER.debug(( f"Image scaled with : {self._scale} W={self._image_width} H={self._image_height}" )) self._state = None self._objects = [] # The parsed raw data self._targets_found = [] saved_image_path = None try: predictions = self._dsobject.detect(image) except ds.DeepstackException as exc: _LOGGER.error("Deepstack error : %s", exc) return self._objects = get_objects(predictions, self._image_width, self._image_height) self._targets_found = [] for obj in self._objects: if not ((obj["name"] in self._targets_names) or (obj["object_type"] in self._targets_names)): continue ## Then check if the type has a configured confidence, if yes assign ## Then if a confidence for a named object, this takes precedence over type confidence confidence = None for target in self._targets: if obj["object_type"] == target[CONF_TARGET]: confidence = target[CONF_CONFIDENCE] for target in self._targets: if obj["name"] == target[CONF_TARGET]: confidence = target[CONF_CONFIDENCE] if obj["confidence"] > confidence: if not object_in_roi(self._roi_dict, obj["centroid"]): continue self._targets_found.append(obj) self._state = len(self._targets_found) if self._state > 0: self._last_detection = dt_util.now().strftime(DATETIME_FORMAT) if self._save_file_folder and self._state > 0: saved_image_path = self.save_image( self._targets_found, self._save_file_folder, ) # Fire events for target in self._targets_found: target_event_data = target.copy() target_event_data[ATTR_ENTITY_ID] = self.entity_id if saved_image_path: target_event_data[SAVED_FILE] = saved_image_path self.hass.bus.fire(EVENT_OBJECT_DETECTED, target_event_data)
def keep_alive(self, starttime, endtime, duration, title, tz, location): """ Keeps the api alive """ current = self._type == TYPE_CURRENT nextev = self._type == TYPE_NEXT self._remaining = 0 tempnow = dt_util.now(tz) if tz is None: self._tz = pytz.utc else: self._tz = tz if starttime is None: self._starttime = None self._starttext = None else: self._starttime = datetime(starttime[1], starttime[2], starttime[3], starttime[4], starttime[5], 0, 0, self._tz) self._starttext = self._starttime.strftime("%A %d %B %Y %H.%M.%S") if nextev: self._remaining = self._starttime - tempnow remainingdays = self._remaining.days remainingseconds = (self._starttime.hour * 3600 + self._starttime.minute * 60 + self._starttime.second - tempnow.hour * 3600 - tempnow.minute * 60 - tempnow.second) if ((self._starttime.year > tempnow.year or self._starttime.month > tempnow.month or self._starttime.day > tempnow.day) and remainingseconds < 0): remainingseconds = 86400 + remainingseconds self._remaining = (remainingdays * 1440 + round(remainingseconds / 60, 0)) if endtime is None: self._endtime = None self._endtext = None else: self._endtime = datetime(endtime[1], endtime[2], endtime[3], endtime[4], endtime[5], 0, 0, self._tz) self._endtext = self._endtime.strftime("%A %d %B %Y %H.%M.%S") if current: self._remaining = self._endtime - tempnow remainingdays = self._remaining.days remainingseconds = (self._endtime.hour * 3600 + self._endtime.minute * 60 + self._endtime.second - tempnow.hour * 3600 - tempnow.minute * 60 - tempnow.second) if ((self._endtime.year > tempnow.year or self._endtime.month > tempnow.month or self._endtime.day > tempnow.day) and remainingseconds < 0): remainingseconds = 86400 + remainingseconds self._remaining = (remainingdays * 1440 + round(remainingseconds / 60, 0)) self._duration = duration self._title = title if (current or nextev) and title is None: self._title = 'Free' self._location = location tempdays = floor(self._remaining / 1440) temphours = floor((self._remaining % 1440) / 60) tempminutes = floor(self._remaining % 60) self._remainingtext = (str(tempdays) + "d " + str(temphours) + "h " + str(tempminutes) + "m") if self._remaining <= 0: self.hass.states.remove(self.entity_id) else: self.update_ha_state()
async def async_added_to_hass(self): """Handle entity which will be added.""" await super().async_added_to_hass() if self._cron_pattern is not None: async_track_point_in_time( self.hass, self._async_reset_meter, croniter(self._cron_pattern, dt_util.now()).get_next(datetime), ) elif self._period == QUARTER_HOURLY: for quarter in range(4): async_track_time_change( self.hass, self._async_reset_meter, minute=(quarter * 15) + self._period_offset.seconds % (15 * 60) // 60, second=self._period_offset.seconds % 60, ) elif self._period == HOURLY: async_track_time_change( self.hass, self._async_reset_meter, minute=self._period_offset.seconds // 60, second=self._period_offset.seconds % 60, ) elif self._period in [ DAILY, WEEKLY, MONTHLY, BIMONTHLY, QUARTERLY, YEARLY ]: async_track_time_change( self.hass, self._async_reset_meter, hour=self._period_offset.seconds // 3600, minute=self._period_offset.seconds % 3600 // 60, second=self._period_offset.seconds % 3600 % 60, ) async_dispatcher_connect(self.hass, SIGNAL_RESET_METER, self.async_reset_meter) state = await self.async_get_last_state() if state: self._state = Decimal(state.state) self._unit_of_measurement = state.attributes.get( ATTR_UNIT_OF_MEASUREMENT) self._last_period = state.attributes.get(ATTR_LAST_PERIOD) self._last_reset = dt_util.as_utc( dt_util.parse_datetime(state.attributes.get(ATTR_LAST_RESET))) if state.attributes.get(ATTR_STATUS) == COLLECTING: # Fake cancellation function to init the meter in similar state self._collecting = lambda: None @callback def async_source_tracking(event): """Wait for source to be ready, then start meter.""" if self._tariff_entity is not None: _LOGGER.debug("<%s> tracks utility meter %s", self.name, self._tariff_entity) async_track_state_change_event(self.hass, [self._tariff_entity], self.async_tariff_change) tariff_entity_state = self.hass.states.get(self._tariff_entity) self._change_status(tariff_entity_state.state) return _LOGGER.debug("<%s> collecting from %s", self.name, self._sensor_source_id) self._collecting = async_track_state_change_event( self.hass, [self._sensor_source_id], self.async_reading) self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, async_source_tracking)
def keep_alive(self): """ Keeps the api alive """ currentminutes = dt_util.now().hour * 60 + dt_util.now().minute maxminute = round(self._interval / 5, 0) if currentminutes % self._interval <= maxminute: self.update_icloud(see)
def is_over(vevent): """Return if the event is over.""" return dt.now() >= WebDavCalendarData.to_datetime( WebDavCalendarData.get_end_date(vevent))
def process_event(self, event): old = self._state # Update state if applicable if event == Events.Disarm: self._state = STATE_ALARM_DISARMED elif event == Events.Trigger: self._state = STATE_ALARM_TRIGGERED elif old == STATE_ALARM_DISARMED: if event == Events.ArmHome: self._state = STATE_ALARM_ARMED_HOME elif event == Events.ArmAway: self._state = STATE_ALARM_PENDING elif event == Events.ArmPerimeter: self._state = STATE_ALARM_ARMED_PERIMETER elif old == STATE_ALARM_PENDING: if event == Events.Timeout: self._state = STATE_ALARM_ARMED_AWAY elif old == STATE_ALARM_ARMED_HOME or \ old == STATE_ALARM_ARMED_AWAY or \ old == STATE_ALARM_ARMED_PERIMETER: if event == Events.ImmediateTrip: self._state = STATE_ALARM_TRIGGERED elif event == Events.DelayedTrip: self._state = STATE_ALARM_WARNING elif old == STATE_ALARM_WARNING: if event == Events.Timeout: self._state = STATE_ALARM_TRIGGERED elif old == STATE_ALARM_TRIGGERED: if event == Events.Timeout: self._state = self._returnto new = self._state if old != new: _LOGGER.debug("[ALARM] Alarm changing from {} to {}".format( old, new)) # Things to do on entering state if new == STATE_ALARM_WARNING: _LOGGER.debug("[ALARM] Turning on warning") switch.turn_on(self._hass, self._warning) self._timeoutat = now() + self._pending_time elif new == STATE_ALARM_TRIGGERED: _LOGGER.debug("[ALARM] Turning on alarm") switch.turn_on(self._hass, self._alarm) self._timeoutat = now() + self._trigger_time elif new == STATE_ALARM_PENDING: _LOGGER.debug("[ALARM] Pending user leaving house") switch.turn_on(self._hass, self._warning) self._timeoutat = now() + self._pending_time self._returnto = STATE_ALARM_ARMED_AWAY self.setsignals(Events.ArmAway) elif new == STATE_ALARM_ARMED_HOME: self._returnto = new self.setsignals(Events.ArmHome) elif new == STATE_ALARM_ARMED_AWAY: self._returnto = new self.setsignals(Events.ArmAway) elif new == STATE_ALARM_ARMED_PERIMETER: self._returnto = new self.setsignals(Events.ArmPerimeter) elif new == STATE_ALARM_DISARMED: self._returnto = new self.clearsignals() # Things to do on leaving state if old == STATE_ALARM_WARNING or old == STATE_ALARM_PENDING: _LOGGER.debug("[ALARM] Turning off warning") switch.turn_off(self._hass, self._warning) elif old == STATE_ALARM_TRIGGERED: _LOGGER.debug("[ALARM] Turning off alarm") switch.turn_off(self._hass, self._alarm) # Let HA know that something changed self.schedule_update_ha_state()
def update(call=None): """Update service for manual updates.""" data.update(dt_util.now()) for sensor in dev: sensor.update()
async def async_run(self, hass): """Entities updater loop.""" def temperature_limit(config, mac, temp): """Set limits for temperature measurement in °C or °F.""" fmac = ':'.join(mac[i:i + 2] for i in range(0, len(mac), 2)) if config[CONF_DEVICES]: for device in config[CONF_DEVICES]: if fmac in device["mac"].upper(): if CONF_TEMPERATURE_UNIT in device: if device[ CONF_TEMPERATURE_UNIT] == TEMP_FAHRENHEIT: temp_fahrenheit = temp * 9 / 5 + 32 return temp_fahrenheit break return temp async def async_add_sensor(mac, sensortype, firmware): t_i, h_i, m_i, p_i, c_i, i_i, f_i, cn_i, bu_i, w_i, nw_i, im_i, vd_i, v_i, b_i = MMTS_DICT[ sensortype][0] if mac not in sensors_by_mac: sensors = [] if t_i != 9: sensors.insert( t_i, TemperatureSensor(self.config, mac, sensortype, firmware)) if h_i != 9: sensors.insert( h_i, HumiditySensor(self.config, mac, sensortype, firmware)) if m_i != 9: sensors.insert( m_i, MoistureSensor(self.config, mac, sensortype, firmware)) if p_i != 9: sensors.insert( p_i, PressureSensor(self.config, mac, sensortype, firmware)) if c_i != 9: sensors.insert( c_i, ConductivitySensor(self.config, mac, sensortype, firmware)) if i_i != 9: sensors.insert( i_i, IlluminanceSensor(self.config, mac, sensortype, firmware)) if f_i != 9: sensors.insert( f_i, FormaldehydeSensor(self.config, mac, sensortype, firmware)) if cn_i != 9: sensors.insert( cn_i, ConsumableSensor(self.config, mac, sensortype, firmware)) if bu_i != 9: sensors.insert( bu_i, ButtonSensor(self.config, mac, sensortype, firmware)) if w_i != 9: sensors.insert( w_i, WeightSensor(self.config, mac, sensortype, firmware)) if nw_i != 9: sensors.insert( nw_i, NonStabilizedWeightSensor(self.config, mac, sensortype, firmware)) if im_i != 9: sensors.insert( im_i, ImpedanceSensor(self.config, mac, sensortype, firmware)) if vd_i != 9: port = 1 sensors.insert( vd_i, VolumeDispensedSensor(self.config, mac, sensortype, port, firmware)) if sensortype == "Kegtron KT-200": port = 2 sensors.insert( vd_i + 1, VolumeDispensedSensor(self.config, mac, sensortype, port, firmware)) if self.batt_entities and (v_i != 9): sensors.insert( v_i, VoltageSensor(self.config, mac, sensortype, firmware)) if self.batt_entities and (b_i != 9): sensors.insert( b_i, BatterySensor(self.config, mac, sensortype, firmware)) if len(sensors) != 0: sensors_by_mac[mac] = sensors self.add_entities(sensors) else: sensors = sensors_by_mac[mac] return sensors _LOGGER.debug("Entities updater loop started!") sensors_by_mac = {} sensors = [] batt = {} # batteries rssi = {} ble_adv_cnt = 0 ts_last = dt_util.now() ts_now = ts_last data = None await asyncio.sleep(0) # Set up sensors of configured devices on startup when sensortype is available in device registry if self.config[CONF_DEVICES]: dev_registry = await hass.helpers.device_registry.async_get_registry( ) for device in self.config[CONF_DEVICES]: mac = device["mac"] # get sensortype and firmware from device registry to setup sensor dev = dev_registry.async_get_device({(DOMAIN, mac)}, set()) if dev: mac = mac.replace(":", "") sensortype = dev.model firmware = dev.sw_version sensors = await async_add_sensor(mac, sensortype, firmware) else: pass else: sensors = [] # Set up new sensors when first BLE advertisement is received sensors = [] while True: try: advevent = await asyncio.wait_for(self.dataqueue.get(), 1) if advevent is None: _LOGGER.debug("Entities updater loop stopped") return True data = advevent self.dataqueue.task_done() except asyncio.TimeoutError: pass if data: _LOGGER.debug("Data measuring sensor received: %s", data) ble_adv_cnt += 1 mac = data["mac"] # the RSSI value will be averaged for all valuable packets if mac not in rssi: rssi[mac] = [] rssi[mac].append(int(data["rssi"])) batt_attr = None sensortype = data["type"] firmware = data["firmware"] t_i, h_i, m_i, p_i, c_i, i_i, f_i, cn_i, bu_i, w_i, nw_i, im_i, vd_i, v_i, b_i = MMTS_DICT[ sensortype][0] sensors = await async_add_sensor(mac, sensortype, firmware) if data["data"] is False: data = None continue # store found readings per device # battery sensors and battery attribute if (b_i != 9): if "battery" in data: batt[mac] = int(data["battery"]) batt_attr = batt[mac] if self.batt_entities: sensors[b_i].collect(data) else: try: batt_attr = batt[mac] except KeyError: batt_attr = None # measuring sensors if "temperature" in data and (t_i != 9): # schedule an immediate update of kettle temperature if sensortype in KETTLES: entity = sensors[t_i] entity.collect(data, batt_attr) if entity.ready_for_update is True: entity.rssi_values = rssi[mac].copy() entity.async_schedule_update_ha_state(True) rssi[mac].clear() entity.pending_update = False else: if (temperature_limit(self.config, mac, CONF_TMAX) >= data["temperature"] >= temperature_limit( self.config, mac, CONF_TMIN)): sensors[t_i].collect(data, batt_attr) elif self.log_spikes: _LOGGER.error( "Temperature spike: %s (%s)", data["temperature"], mac, ) if "humidity" in data and (h_i != 9): if CONF_HMAX >= data["humidity"] >= CONF_HMIN: sensors[h_i].collect(data, batt_attr) elif self.log_spikes: _LOGGER.error( "Humidity spike: %s (%s)", data["humidity"], mac, ) if "conductivity" in data and (c_i != 9): sensors[c_i].collect(data, batt_attr) if "pressure" in data and (p_i != 9): sensors[p_i].collect(data, batt_attr) if "moisture" in data and (m_i != 9): sensors[m_i].collect(data, batt_attr) if "illuminance" in data and (i_i != 9): sensors[i_i].collect(data, batt_attr) if "formaldehyde" in data and (f_i != 9): sensors[f_i].collect(data, batt_attr) if "consumable" in data and (cn_i != 9): sensors[cn_i].collect(data, batt_attr) if "button" in data and (bu_i != 9): button = sensors[bu_i] # schedule an immediate update of button sensors button.collect(data, batt_attr) if button.ready_for_update is True: button.rssi_values = rssi[mac].copy() button.async_schedule_update_ha_state(True) button.pending_update = False if "weight" in data and (w_i != 9): weight = sensors[w_i] # schedule an immediate update of weight sensors weight.collect(data, batt_attr) if weight.ready_for_update is True: weight.rssi_values = rssi[mac].copy() weight.async_schedule_update_ha_state(True) weight.pending_update = False if "non-stabilized weight" in data and (nw_i != 9): non_stabilized_weight = sensors[nw_i] # schedule an immediate update of non-stabilized weight sensors non_stabilized_weight.collect(data, batt_attr) if non_stabilized_weight.ready_for_update is True: non_stabilized_weight.rssi_values = rssi[mac].copy() non_stabilized_weight.async_schedule_update_ha_state( True) non_stabilized_weight.pending_update = False if "impedance" in data and (im_i != 9): impedance = sensors[im_i] # schedule an immediate update of impedance sensors impedance.collect(data, batt_attr) if impedance.ready_for_update is True: impedance.rssi_values = rssi[mac].copy() impedance.async_schedule_update_ha_state(True) impedance.pending_update = False if "volume dispensed" in data and (vd_i != 9): port = data["port index"] vd_i = vd_i + port - 1 volume_dispensed = sensors[vd_i] # schedule an immediate update of kegtron volume dispensed sensors volume_dispensed.collect(data, batt_attr) if volume_dispensed.ready_for_update is True: volume_dispensed.rssi_values = rssi[mac].copy() volume_dispensed.async_schedule_update_ha_state(True) volume_dispensed.pending_update = False if self.batt_entities: if "voltage" in data and (v_i != 9): sensors[v_i].collect(data, batt_attr) data = None ts_now = dt_util.now() if ts_now - ts_last < timedelta(seconds=self.period): continue ts_last = ts_now # restarting scanner self.monitor.restart() # for every updated device for mac, elist in sensors_by_mac.items(): for entity in elist: if entity.pending_update is True: if entity.ready_for_update is True: entity.rssi_values = rssi[mac].copy() entity.async_schedule_update_ha_state(True) for mac in rssi: rssi[mac].clear() _LOGGER.debug( "%i BLE ADV messages processed for %i measuring device(s).", ble_adv_cnt, len(sensors_by_mac), ) ble_adv_cnt = 0
def __init__(self, name: str) -> None: """Initialize the uptime sensor.""" self._name = name self._state = dt_util.now().isoformat()
async def test_media_store_persistence(hass, auth, hass_client, event_store): """Test the disk backed media store persistence.""" nest_device = Device.MakeDevice( { "name": DEVICE_ID, "type": CAMERA_DEVICE_TYPE, "traits": BATTERY_CAMERA_TRAITS, }, auth=auth, ) subscriber = FakeSubscriber() device_manager = await subscriber.async_get_device_manager() device_manager.add_device(nest_device) # Fetch media for events when published subscriber.cache_policy.fetch = True config_entry = create_config_entry() config_entry.add_to_hass(hass) with patch( "homeassistant.helpers.config_entry_oauth2_flow.async_get_config_entry_implementation" ), patch("homeassistant.components.nest.PLATFORMS", [PLATFORM]), patch( "homeassistant.components.nest.api.GoogleNestSubscriber", return_value=subscriber, ): assert await async_setup_component(hass, DOMAIN, CONFIG) await hass.async_block_till_done() device_registry = dr.async_get(hass) device = device_registry.async_get_device({(DOMAIN, DEVICE_ID)}) assert device assert device.name == DEVICE_NAME auth.responses = [ aiohttp.web.Response(body=IMAGE_BYTES_FROM_EVENT), ] event_timestamp = dt_util.now() await subscriber.async_receive_event( create_event_message( create_battery_event_data(MOTION_EVENT), timestamp=event_timestamp ) ) await hass.async_block_till_done() # Browse to event browse = await media_source.async_browse_media( hass, f"{const.URI_SCHEME}{DOMAIN}/{device.id}" ) assert len(browse.children) == 1 assert browse.children[0].domain == DOMAIN event_timestamp_string = event_timestamp.strftime(DATE_STR_FORMAT) assert browse.children[0].title == f"Motion @ {event_timestamp_string}" assert not browse.children[0].can_expand assert browse.children[0].can_play event_identifier = browse.children[0].identifier media = await media_source.async_resolve_media( hass, f"{const.URI_SCHEME}{DOMAIN}/{event_identifier}" ) assert media.url == f"/api/nest/event_media/{event_identifier}" assert media.mime_type == "video/mp4" # Fetch event media client = await hass_client() response = await client.get(media.url) assert response.status == HTTPStatus.OK, "Response not matched: %s" % response contents = await response.read() assert contents == IMAGE_BYTES_FROM_EVENT # Ensure event media store persists to disk await hass.async_block_till_done() # Unload the integration. assert config_entry.state == ConfigEntryState.LOADED assert await hass.config_entries.async_unload(config_entry.entry_id) await hass.async_block_till_done() assert config_entry.state == ConfigEntryState.NOT_LOADED # Now rebuild the entire integration and verify that all persisted storage # can be re-loaded from disk. subscriber = FakeSubscriber() device_manager = await subscriber.async_get_device_manager() device_manager.add_device(nest_device) with patch( "homeassistant.helpers.config_entry_oauth2_flow.async_get_config_entry_implementation" ), patch("homeassistant.components.nest.PLATFORMS", [PLATFORM]), patch( "homeassistant.components.nest.api.GoogleNestSubscriber", return_value=subscriber, ): await hass.config_entries.async_reload(config_entry.entry_id) await hass.async_block_till_done() device_registry = dr.async_get(hass) device = device_registry.async_get_device({(DOMAIN, DEVICE_ID)}) assert device assert device.name == DEVICE_NAME # Verify event metadata exists browse = await media_source.async_browse_media( hass, f"{const.URI_SCHEME}{DOMAIN}/{device.id}" ) assert len(browse.children) == 1 assert browse.children[0].domain == DOMAIN event_timestamp_string = event_timestamp.strftime(DATE_STR_FORMAT) assert browse.children[0].title == f"Motion @ {event_timestamp_string}" assert not browse.children[0].can_expand assert browse.children[0].can_play event_identifier = browse.children[0].identifier media = await media_source.async_resolve_media( hass, f"{const.URI_SCHEME}{DOMAIN}/{event_identifier}" ) assert media.url == f"/api/nest/event_media/{event_identifier}" assert media.mime_type == "video/mp4" # Verify media exists response = await client.get(media.url) assert response.status == HTTPStatus.OK, "Response not matched: %s" % response contents = await response.read() assert contents == IMAGE_BYTES_FROM_EVENT
def generate_fig(_=None): now = dt_util.now() hour = now.hour dt = datetime.timedelta(minutes=now.minute) plt.style.use('ggplot') xFmt = mdates.DateFormatter('%H', tz=tz.gettz('Europe/Berlin')) fig = plt.figure() ax = fig.add_subplot(111) ax.grid(which='major', axis='x', linestyle='-', color='gray', alpha=0.25) plt.tick_params(axis="both", which="both", bottom=False, top=False, labelbottom=True, left=False, right=False, labelleft=True) ax.plot([dates[hour] + dt, dates[hour] + dt], [min(prices) - 3, max(prices) + 3], 'r', alpha=0.35, linestyle='-') ax.plot(dates, prices, '#039be5') ax.fill_between(dates, 0, prices, facecolor='#039be5', alpha=0.25) plt.text(dates[hour] + dt, prices[hour], str(round(prices[hour], 1)), fontsize=14) min_length = 5 if len(dates) > 24 else 3 last_hour = -1 * min_length for _hour in range(1, len(prices) - 1): if abs(_hour - last_hour) < min_length or abs(_hour - hour) < min_length: continue if (prices[_hour - 1] > prices[_hour] < prices[_hour + 1]) \ or (prices[_hour - 1] < prices[_hour] > prices[_hour + 1]): last_hour = _hour plt.text(dates[_hour], prices[_hour], str(round(prices[_hour], 1)) + "\n{:02}".format(_hour % 24), fontsize=14, va='bottom') ax.set_xlim((dates[0] - datetime.timedelta(minutes=3), dates[-1] + datetime.timedelta(minutes=3))) ax.set_ylim((min(prices) - 0.5, max(prices) + 0.5)) ax.set_facecolor('white') # import plotly.plotly as py ax.xaxis.set_major_formatter(xFmt) fig.autofmt_xdate() fig.savefig('/tmp/prices.png') # file name in your local system plt.close(fig) plt.close('all')