async def async_added_to_opp(self): """Handle entity which will be added.""" await super().async_added_to_opp() state = await self.async_get_last_state() if state: self._pvpc_data.state = state.state # Update 'state' value in hour changes self._hourly_tracker = async_track_time_change( self.opp, self.update_current_price, second=[0], minute=[0]) # Update prices at random time, 2 times/hour (don't want to upset API) random_minute = randint(1, 29) mins_update = [random_minute, random_minute + 30] self._price_tracker = async_track_time_change(self.opp, self.async_update_prices, second=[0], minute=mins_update) _LOGGER.debug( "Setup of price sensor %s (%s) with tariff '%s', " "updating prices each hour at %s min", self.name, self.entity_id, self._pvpc_data.tariff, mins_update, ) await self.async_update_prices(dt_util.utcnow()) self.update_current_price(dt_util.utcnow())
def async_start_setup(opp: core.OpenPeerPower, components: Iterable) -> Generator: """Keep track of when setup starts and finishes.""" setup_started = opp.data.setdefault(DATA_SETUP_STARTED, {}) started = dt_util.utcnow() unique_components = {} for domain in components: unique = ensure_unique_string(domain, setup_started) unique_components[unique] = domain setup_started[unique] = started yield setup_time = opp.data.setdefault(DATA_SETUP_TIME, {}) time_taken = dt_util.utcnow() - started for unique, domain in unique_components.items(): del setup_started[unique] if "." in domain: _, integration = domain.split(".", 1) else: integration = domain if integration in setup_time: setup_time[integration] += time_taken else: setup_time[integration] = time_taken
async def test_if_not_fires_on_change_with_for(opp, calls): """Test for firing on change with for.""" assert await async_setup_component( opp, automation.DOMAIN, { automation.DOMAIN: { "trigger": { "platform": "template", "value_template": "{{ is_state('test.entity', 'world') }}", "for": { "seconds": 5 }, }, "action": { "service": "test.automation" }, } }, ) opp.states.async_set("test.entity", "world") await opp.async_block_till_done() assert len(calls) == 0 async_fire_time_changed(opp, dt_util.utcnow() + timedelta(seconds=4)) await opp.async_block_till_done() assert len(calls) == 0 opp.states.async_set("test.entity", "hello") await opp.async_block_till_done() assert len(calls) == 0 async_fire_time_changed(opp, dt_util.utcnow() + timedelta(seconds=6)) await opp.async_block_till_done() assert len(calls) == 0
async def _check_awaked(): """Wait for Z-wave awaked state (or timeout) and finalize start.""" _LOGGER.debug("network state: %d %s", network.state, network.state_str) start_time = dt_util.utcnow() while True: waited = int((dt_util.utcnow() - start_time).total_seconds()) if network.state >= network.STATE_AWAKED: # Need to be in STATE_AWAKED before talking to nodes. _LOGGER.info("Z-Wave ready after %d seconds", waited) break if waited >= const.NETWORK_READY_WAIT_SECS: # Wait up to NETWORK_READY_WAIT_SECS seconds for the Z-Wave # network to be ready. _LOGGER.warning( "Z-Wave not ready after %d seconds, continuing anyway", waited) _LOGGER.info("Final network state: %d %s", network.state, network.state_str) break await asyncio.sleep(1) opp.async_add_job(_finalize_start)
async def test_alexa_update_expose_trigger_sync(opp, cloud_prefs): """Test Alexa config responds to updating exposed entities.""" alexa_config.AlexaConfig(opp, ALEXA_SCHEMA({}), cloud_prefs, None) with patch_sync_helper() as (to_update, to_remove): await cloud_prefs.async_update_alexa_entity_config( entity_id="light.kitchen", should_expose=True) await opp.async_block_till_done() async_fire_time_changed(opp, utcnow()) await opp.async_block_till_done() assert to_update == ["light.kitchen"] assert to_remove == [] with patch_sync_helper() as (to_update, to_remove): await cloud_prefs.async_update_alexa_entity_config( entity_id="light.kitchen", should_expose=False) await cloud_prefs.async_update_alexa_entity_config( entity_id="binary_sensor.door", should_expose=True) await cloud_prefs.async_update_alexa_entity_config( entity_id="sensor.temp", should_expose=True) await opp.async_block_till_done() async_fire_time_changed(opp, utcnow()) await opp.async_block_till_done() assert sorted(to_update) == ["binary_sensor.door", "sensor.temp"] assert to_remove == ["light.kitchen"]
async def async_get_access_token(self): """Get an access token.""" if self._token_valid is not None and self._token_valid > utcnow(): return self._token resp = await cloud_api.async_alexa_access_token(self._cloud) body = await resp.json() if resp.status == HTTP_BAD_REQUEST: if body["reason"] in ("RefreshTokenNotFound", "UnknownRegion"): if self.should_report_state: await self._prefs.async_update(alexa_report_state=False) self.opp.components.persistent_notification.async_create( f"There was an error reporting state to Alexa ({body['reason']}). " "Please re-link your Alexa skill via the Alexa app to " "continue using it.", "Alexa state reporting disabled", "cloud_alexa_report", ) raise RequireRelink raise alexa_errors.NoTokenAvailable self._token = body["access_token"] self._endpoint = body["event_endpoint"] self._token_valid = utcnow() + timedelta(seconds=body["expires_in"]) return self._token
async def test_update_interval_not_present(opp, crd_without_update_interval): """Test update never happens with no update interval.""" crd = crd_without_update_interval # Test we don't update without subscriber with no update interval async_fire_time_changed(opp, utcnow() + DEFAULT_UPDATE_INTERVAL) await opp.async_block_till_done() assert crd.data is None # Add subscriber update_callback = Mock() crd.async_add_listener(update_callback) # Test twice we don't update with subscriber with no update interval async_fire_time_changed(opp, utcnow() + DEFAULT_UPDATE_INTERVAL) await opp.async_block_till_done() assert crd.data is None async_fire_time_changed(opp, utcnow() + DEFAULT_UPDATE_INTERVAL) await opp.async_block_till_done() assert crd.data is None # Test removing listener crd.async_remove_listener(update_callback) async_fire_time_changed(opp, utcnow() + DEFAULT_UPDATE_INTERVAL) await opp.async_block_till_done() # Test we stop don't update after we lose last subscriber assert crd.data is None
async def async_service_learn_handler(entity, service): """Handle a learn command.""" device = entity.device slot = service.data.get(CONF_SLOT, entity.slot) await opp.async_add_executor_job(device.learn, slot) timeout = service.data.get(CONF_TIMEOUT, entity.timeout) _LOGGER.info("Press the key you want Open Peer Power to learn") start_time = utcnow() while (utcnow() - start_time) < timedelta(seconds=timeout): message = await opp.async_add_executor_job(device.read, slot) _LOGGER.debug("Message received from device: '%s'", message) if "code" in message and message["code"]: log_msg = "Received command is: {}".format(message["code"]) _LOGGER.info(log_msg) opp.components.persistent_notification.async_create( log_msg, title="Xiaomi Miio Remote" ) return if "error" in message and message["error"]["message"] == "learn timeout": await opp.async_add_executor_job(device.learn, slot) await asyncio.sleep(1) _LOGGER.error("Timeout. No infrared command captured") opp.components.persistent_notification.async_create( "Timeout. No infrared command captured", title="Xiaomi Miio Remote" )
async def test_dynamically_handle_segments( opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker ) -> None: """Test if a new/deleted segment is dynamically added/removed.""" await init_integration(opp, aioclient_mock) assert opp.states.get("light.wled_rgb_light_master") assert opp.states.get("light.wled_rgb_light_segment_0") assert opp.states.get("light.wled_rgb_light_segment_1") data = json.loads(load_fixture("wled/rgb_single_segment.json")) device = WLEDDevice(data) # Test removal if segment went missing, including the master entity with patch( "openpeerpower.components.wled.WLED.update", return_value=device, ): async_fire_time_changed(opp, dt_util.utcnow() + SCAN_INTERVAL) await opp.async_block_till_done() assert opp.states.get("light.wled_rgb_light_segment_0") assert not opp.states.get("light.wled_rgb_light_segment_1") assert not opp.states.get("light.wled_rgb_light_master") # Test adding if segment shows up again, including the master entity async_fire_time_changed(opp, dt_util.utcnow() + SCAN_INTERVAL) await opp.async_block_till_done() assert opp.states.get("light.wled_rgb_light_master") assert opp.states.get("light.wled_rgb_light_segment_0") assert opp.states.get("light.wled_rgb_light_segment_1")
async def _async_learn_ir_command(self, command): """Learn an infrared command.""" try: await self._device.async_request(self._device.api.enter_learning) except (BroadlinkException, OSError) as err: _LOGGER.debug("Failed to enter learning mode: %s", err) raise self.opp.components.persistent_notification.async_create( f"Press the '{command}' button.", title="Learn command", notification_id="learn_command", ) try: start_time = utcnow() while (utcnow() - start_time) < LEARNING_TIMEOUT: await asyncio.sleep(1) try: code = await self._device.async_request(self._device.api.check_data) except (ReadError, StorageError): continue return b64encode(code).decode("utf8") raise TimeoutError( "No infrared code received within " f"{LEARNING_TIMEOUT.total_seconds()} seconds" ) finally: self.opp.components.persistent_notification.async_dismiss( notification_id="learn_command" )
async def test_no_limit(opp): """Test async_schedule_action always return None when there is no rate limit.""" refresh_called = False @callback def _refresh(): nonlocal refresh_called refresh_called = True return rate_limiter = ratelimit.KeyedRateLimit(opp) rate_limiter.async_triggered("key1", dt_util.utcnow()) assert ( rate_limiter.async_schedule_action("key1", None, dt_util.utcnow(), _refresh) is None ) assert not refresh_called assert not rate_limiter.async_has_timer("key1") rate_limiter.async_triggered("key1", dt_util.utcnow()) assert ( rate_limiter.async_schedule_action("key1", None, dt_util.utcnow(), _refresh) is None ) assert not refresh_called assert not rate_limiter.async_has_timer("key1") rate_limiter.async_remove()
async def test_hit(opp): """Test hitting the rate limit.""" refresh_called = False @callback def _refresh(): nonlocal refresh_called refresh_called = True return rate_limiter = ratelimit.KeyedRateLimit(opp) rate_limiter.async_triggered("key1", dt_util.utcnow()) assert ( rate_limiter.async_schedule_action( "key1", timedelta(seconds=0.001), dt_util.utcnow(), _refresh ) is not None ) assert not refresh_called assert rate_limiter.async_has_timer("key1") await asyncio.sleep(0.002) assert refresh_called assert ( rate_limiter.async_schedule_action( "key2", timedelta(seconds=0.001), dt_util.utcnow(), _refresh ) is None ) rate_limiter.async_remove()
async def _add_test_recorder_runs(opp: OpenPeerPower, instance: recorder.Recorder): """Add a few recorder_runs for testing.""" utcnow = dt_util.utcnow() five_days_ago = utcnow - timedelta(days=5) eleven_days_ago = utcnow - timedelta(days=11) await opp.async_block_till_done() await async_wait_recording_done(opp, instance) with recorder.session_scope(opp=opp) as session: for rec_id in range(6): if rec_id < 2: timestamp = eleven_days_ago elif rec_id < 4: timestamp = five_days_ago else: timestamp = utcnow session.add( RecorderRuns( start=timestamp, created=dt_util.utcnow(), end=timestamp + timedelta(days=1), ) )
async def test_miss(opp): """Test missing the rate limit.""" refresh_called = False @callback def _refresh(): nonlocal refresh_called refresh_called = True return rate_limiter = ratelimit.KeyedRateLimit(opp) assert ( rate_limiter.async_schedule_action( "key1", timedelta(seconds=0.1), dt_util.utcnow(), _refresh ) is None ) assert not refresh_called assert not rate_limiter.async_has_timer("key1") assert ( rate_limiter.async_schedule_action( "key1", timedelta(seconds=0.1), dt_util.utcnow(), _refresh ) is None ) assert not refresh_called assert not rate_limiter.async_has_timer("key1") rate_limiter.async_remove()
def _add_purge_records(opp: OpenPeerPower) -> None: with recorder.session_scope(opp=opp) as session: # Add states and state_changed events that should be purged for days in range(1, 4): timestamp = dt_util.utcnow() - timedelta(days=days) for event_id in range(1000, 1020): _add_state_and_state_changed_event( session, "sensor.purge_entity", "purgeme", timestamp, event_id * days, ) timestamp = dt_util.utcnow() - timedelta(days=days) for event_id in range(10000, 10020): _add_state_and_state_changed_event( session, "purge_domain.entity", "purgeme", timestamp, event_id * days, ) timestamp = dt_util.utcnow() - timedelta(days=days) for event_id in range(100000, 100020): _add_state_and_state_changed_event( session, "binary_sensor.purge_glob", "purgeme", timestamp, event_id * days, )
def _add_db_entries(opp: OpenPeerPower) -> None: with recorder.session_scope(opp=opp) as session: # Add states and state_changed events that should be purged for days in range(1, 4): timestamp = dt_util.utcnow() - timedelta(days=days) for event_id in range(1000, 1020): _add_state_and_state_changed_event( session, "sensor.excluded", "purgeme", timestamp, event_id * days, ) # Add events that should be keeped timestamp = dt_util.utcnow() - timedelta(days=1) for event_id in range(200, 210): session.add( Events( event_id=event_id, event_type="EVENT_KEEP", event_data="{}", origin="LOCAL", created=timestamp, time_fired=timestamp, ) ) # Add states with linked old_state_ids that need to be handled timestamp = dt_util.utcnow() - timedelta(days=0) state_1 = States( entity_id="sensor.linked_old_state_id", domain="sensor", state="keep", attributes="{}", last_changed=timestamp, last_updated=timestamp, created=timestamp, old_state_id=1, ) timestamp = dt_util.utcnow() - timedelta(days=4) state_2 = States( entity_id="sensor.linked_old_state_id", domain="sensor", state="keep", attributes="{}", last_changed=timestamp, last_updated=timestamp, created=timestamp, old_state_id=2, ) state_3 = States( entity_id="sensor.linked_old_state_id", domain="sensor", state="keep", attributes="{}", last_changed=timestamp, last_updated=timestamp, created=timestamp, old_state_id=62, # keep ) session.add_all((state_1, state_2, state_3))
def _add_db_entries(opp: OpenPeerPower) -> None: with recorder.session_scope(opp=opp) as session: # Add events that should be purged for days in range(1, 4): timestamp = dt_util.utcnow() - timedelta(days=days) for event_id in range(1000, 1020): session.add( Events( event_id=event_id * days, event_type="EVENT_PURGE", event_data="{}", origin="LOCAL", created=timestamp, time_fired=timestamp, ) ) # Add states and state_changed events that should be keeped timestamp = dt_util.utcnow() - timedelta(days=1) for event_id in range(200, 210): _add_state_and_state_changed_event( session, "sensor.keep", "keep", timestamp, event_id, )
async def test_update_interval(opp, crd): """Test update interval works.""" # Test we don't update without subscriber async_fire_time_changed(opp, utcnow() + crd.update_interval) await opp.async_block_till_done() assert crd.data is None # Add subscriber update_callback = Mock() crd.async_add_listener(update_callback) # Test twice we update with subscriber async_fire_time_changed(opp, utcnow() + crd.update_interval) await opp.async_block_till_done() assert crd.data == 1 async_fire_time_changed(opp, utcnow() + crd.update_interval) await opp.async_block_till_done() assert crd.data == 2 # Test removing listener crd.async_remove_listener(update_callback) async_fire_time_changed(opp, utcnow() + crd.update_interval) await opp.async_block_till_done() # Test we stop updating after we lose last subscriber assert crd.data == 2
async def test_template_with_templated_delay_off(opp): """Test binary sensor template with template delay off.""" config = { "binary_sensor": { "platform": "template", "sensors": { "test": { "friendly_name": "virtual thingy", "value_template": "{{ states.sensor.test_state.state == 'on' }}", "device_class": "motion", "delay_off": '{{ ({ "seconds": 6 / 2 }) }}', } }, } } opp.states.async_set("sensor.test_state", "on") await setup.async_setup_component(opp, binary_sensor.DOMAIN, config) await opp.async_block_till_done() await opp.async_start() opp.states.async_set("sensor.test_state", "off") await opp.async_block_till_done() state = opp.states.get("binary_sensor.test") assert state.state == "on" future = dt_util.utcnow() + timedelta(seconds=3) async_fire_time_changed(opp, future) await opp.async_block_till_done() state = opp.states.get("binary_sensor.test") assert state.state == "off" # check with time changes opp.states.async_set("sensor.test_state", "on") await opp.async_block_till_done() state = opp.states.get("binary_sensor.test") assert state.state == "on" opp.states.async_set("sensor.test_state", "off") await opp.async_block_till_done() state = opp.states.get("binary_sensor.test") assert state.state == "on" opp.states.async_set("sensor.test_state", "on") await opp.async_block_till_done() state = opp.states.get("binary_sensor.test") assert state.state == "on" future = dt_util.utcnow() + timedelta(seconds=3) async_fire_time_changed(opp, future) await opp.async_block_till_done() state = opp.states.get("binary_sensor.test") assert state.state == "on"
def pressed(): """Handle the press of the LiteJet switch's button.""" nonlocal cancel_pressed_more_than, pressed_time nonlocal held_less_than, held_more_than pressed_time = dt_util.utcnow() if held_more_than is None and held_less_than is None: opp.add_job(call_action) if held_more_than is not None and held_less_than is None: cancel_pressed_more_than = track_point_in_utc_time( opp, pressed_more_than_satisfied, dt_util.utcnow() + held_more_than)
async def async_refresh_data(self, now): """Refresh the leaf data and update the datastore.""" if self.request_in_progress: _LOGGER.debug("Refresh currently in progress for %s", self.leaf.nickname) return _LOGGER.debug("Updating Nissan Leaf Data") self.last_check = datetime.today() self.request_in_progress = True server_response = await self.async_get_battery() if server_response is not None: _LOGGER.debug("Server Response: %s", server_response.__dict__) if server_response.answer["status"] == HTTP_OK: self.data[DATA_BATTERY] = server_response.battery_percent # pycarwings2 library doesn't always provide cruising rnages # so we have to check if they exist before we can use them. # Root cause: the nissan servers don't always send the data. if hasattr(server_response, "cruising_range_ac_on_km"): self.data[ DATA_RANGE_AC] = server_response.cruising_range_ac_on_km else: self.data[DATA_RANGE_AC] = None if hasattr(server_response, "cruising_range_ac_off_km"): self.data[ DATA_RANGE_AC_OFF] = server_response.cruising_range_ac_off_km else: self.data[DATA_RANGE_AC_OFF] = None self.data[DATA_PLUGGED_IN] = server_response.is_connected self.data[DATA_CHARGING] = server_response.is_charging async_dispatcher_send(self.opp, SIGNAL_UPDATE_LEAF) self.last_battery_response = utcnow() # Climate response only updated if battery data updated first. if server_response is not None: try: climate_response = await self.async_get_climate() if climate_response is not None: _LOGGER.debug("Got climate data for Leaf: %s", climate_response.__dict__) self.data[DATA_CLIMATE] = climate_response.is_hvac_running self.last_climate_response = utcnow() except CarwingsError: _LOGGER.error("Error fetching climate info") self.request_in_progress = False async_dispatcher_send(self.opp, SIGNAL_UPDATE_LEAF)
async def test_toggle_covers(opp, setup_comp): """Test toggle cover function.""" # Start covers in open state await opp.services.async_call(DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True) for _ in range(10): future = dt_util.utcnow() + timedelta(seconds=1) async_fire_time_changed(opp, future) await opp.async_block_till_done() state = opp.states.get(COVER_GROUP) assert state.state == STATE_OPEN # Toggle will close covers await opp.services.async_call(DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True) for _ in range(10): future = dt_util.utcnow() + timedelta(seconds=1) async_fire_time_changed(opp, future) await opp.async_block_till_done() state = opp.states.get(COVER_GROUP) assert state.state == STATE_CLOSED assert state.attributes[ATTR_CURRENT_POSITION] == 0 assert opp.states.get(DEMO_COVER).state == STATE_CLOSED assert opp.states.get( DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 0 assert opp.states.get( DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 0 # Toggle again will open covers await opp.services.async_call(DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True) for _ in range(10): future = dt_util.utcnow() + timedelta(seconds=1) async_fire_time_changed(opp, future) await opp.async_block_till_done() state = opp.states.get(COVER_GROUP) assert state.state == STATE_OPEN assert state.attributes[ATTR_CURRENT_POSITION] == 100 assert opp.states.get(DEMO_COVER).state == STATE_OPEN assert opp.states.get( DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 100 assert opp.states.get( DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 100
async def test_back_to_back_trigger_with_no_disarm_after_trigger(opp): """Test disarm after trigger.""" assert await async_setup_component( opp, alarm_control_panel.DOMAIN, { "alarm_control_panel": { "platform": "manual", "name": "test", "trigger_time": 5, "arming_time": 0, "delay_time": 0, "disarm_after_trigger": False, } }, ) await opp.async_block_till_done() entity_id = "alarm_control_panel.test" assert opp.states.get(entity_id).state == STATE_ALARM_DISARMED await common.async_alarm_arm_away(opp, CODE, entity_id) assert opp.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY await common.async_alarm_trigger(opp, entity_id=entity_id) assert opp.states.get(entity_id).state == STATE_ALARM_TRIGGERED future = dt_util.utcnow() + timedelta(seconds=5) with patch( ("openpeerpower.components.manual.alarm_control_panel.dt_util.utcnow"), return_value=future, ): async_fire_time_changed(opp, future) await opp.async_block_till_done() assert opp.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY await common.async_alarm_trigger(opp, entity_id=entity_id) assert opp.states.get(entity_id).state == STATE_ALARM_TRIGGERED future = dt_util.utcnow() + timedelta(seconds=5) with patch( ("openpeerpower.components.manual.alarm_control_panel.dt_util.utcnow"), return_value=future, ): async_fire_time_changed(opp, future) await opp.async_block_till_done() assert opp.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
async def test_chain_history(opp, values, missing=False): """Test if filter chaining works.""" config = { "sensor": { "platform": "filter", "name": "test", "entity_id": "sensor.test_monitored", "filters": [ {"filter": "outlier", "window_size": 10, "radius": 4.0}, {"filter": "lowpass", "time_constant": 10, "precision": 2}, {"filter": "throttle", "window_size": 1}, ], }, } await async_init_recorder_component(opp) t_0 = dt_util.utcnow() - timedelta(minutes=1) t_1 = dt_util.utcnow() - timedelta(minutes=2) t_2 = dt_util.utcnow() - timedelta(minutes=3) t_3 = dt_util.utcnow() - timedelta(minutes=4) if missing: fake_states = {} else: fake_states = { "sensor.test_monitored": [ ha.State("sensor.test_monitored", 18.0, last_changed=t_0), ha.State("sensor.test_monitored", "unknown", last_changed=t_1), ha.State("sensor.test_monitored", 19.0, last_changed=t_2), ha.State("sensor.test_monitored", 18.2, last_changed=t_3), ] } with patch( "openpeerpower.components.recorder.history.state_changes_during_period", return_value=fake_states, ), patch( "openpeerpower.components.recorder.history.get_last_state_changes", return_value=fake_states, ): with assert_setup_component(1, "sensor"): assert await async_setup_component(opp, "sensor", config) await opp.async_block_till_done() for value in values: opp.states.async_set(config["sensor"]["entity_id"], value.state) await opp.async_block_till_done() state = opp.states.get("sensor.test") if missing: assert state.state == "18.05" else: assert state.state == "17.05"
async def test_availability(ismartgateapi_mock, opp: OpenPeerPower) -> None: """Test availability.""" closed_door_response = _mocked_ismartgate_closed_door_response() expected_attributes = { "device_class": "garage", "door_id": 1, "friendly_name": "Door1", "supported_features": SUPPORT_CLOSE | SUPPORT_OPEN, } api = MagicMock(ISmartGateApi) api.async_info.return_value = closed_door_response ismartgateapi_mock.return_value = api config_entry = MockConfigEntry( domain=DOMAIN, source=SOURCE_USER, data={ CONF_DEVICE: DEVICE_TYPE_ISMARTGATE, CONF_IP_ADDRESS: "127.0.0.1", CONF_USERNAME: "******", CONF_PASSWORD: "******", }, ) config_entry.add_to_opp(opp) assert opp.states.get("cover.door1") is None assert await opp.config_entries.async_setup(config_entry.entry_id) await opp.async_block_till_done() assert opp.states.get("cover.door1") assert (opp.states.get("cover.door1").attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_GARAGE) assert (opp.states.get("cover.door2").attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_GATE) api.async_info.side_effect = Exception("Error") async_fire_time_changed(opp, utcnow() + timedelta(hours=2)) await opp.async_block_till_done() assert opp.states.get("cover.door1").state == STATE_UNAVAILABLE api.async_info.side_effect = None api.async_info.return_value = closed_door_response api.async_get_door_statuses_from_info.return_value = { 1: DoorStatus.CLOSED, 2: DoorStatus.CLOSED, } async_fire_time_changed(opp, utcnow() + timedelta(hours=2)) await opp.async_block_till_done() assert opp.states.get("cover.door1").state == STATE_CLOSED assert dict( opp.states.get("cover.door1").attributes) == expected_attributes
def test_measure(self): """Test the history statistics sensor measure.""" t0 = dt_util.utcnow() - timedelta(minutes=40) t1 = t0 + timedelta(minutes=20) t2 = dt_util.utcnow() - timedelta(minutes=10) # Start t0 t1 t2 End # |--20min--|--20min--|--10min--|--10min--| # |---off---|---on----|---off---|---on----| fake_states = { "binary_sensor.test_id": [ op.State("binary_sensor.test_id", "on", last_changed=t0), op.State("binary_sensor.test_id", "off", last_changed=t1), op.State("binary_sensor.test_id", "on", last_changed=t2), ] } start = Template("{{ as_timestamp(now()) - 3600 }}", self.opp) end = Template("{{ now() }}", self.opp) sensor1 = HistoryStatsSensor(self.opp, "binary_sensor.test_id", "on", start, end, None, "time", "Test") sensor2 = HistoryStatsSensor(self.opp, "unknown.id", "on", start, end, None, "time", "Test") sensor3 = HistoryStatsSensor(self.opp, "binary_sensor.test_id", "on", start, end, None, "count", "test") sensor4 = HistoryStatsSensor(self.opp, "binary_sensor.test_id", "on", start, end, None, "ratio", "test") assert sensor1._type == "time" assert sensor3._type == "count" assert sensor4._type == "ratio" with patch( "openpeerpower.components.history.state_changes_during_period", return_value=fake_states, ): with patch("openpeerpower.components.history.get_state", return_value=None): sensor1.update() sensor2.update() sensor3.update() sensor4.update() assert sensor1.state == 0.5 assert sensor2.state is None assert sensor3.state == 2 assert sensor4.state == 50
def update(self): """Get updated stats from API.""" last_update = dt_util.utcnow() - self._client.last_zone_update _LOGGER.debug("Zone: %s ", self._zone) if last_update > datetime.timedelta(seconds=1): self._client.zones = self._client.list_zones() self._client.last_zone_update = dt_util.utcnow() _LOGGER.debug("Updated from zone: %s", self._zone["name"]) if hasattr(self._client, "zones"): self._zone = next( (x for x in self._client.zones if x["number"] == self._number), None)
async def simulate_time(opp, mock_litejet, delta): """Test to simulate time.""" _LOGGER.info("*** simulate time change by %s: %s", delta, mock_litejet.start_time + delta) mock_litejet.last_delta = delta with mock.patch( "openpeerpower.helpers.condition.dt_util.utcnow", return_value=mock_litejet.start_time + delta, ): _LOGGER.info("now=%s", dt_util.utcnow()) async_fire_time_changed(opp, mock_litejet.start_time + delta) await opp.async_block_till_done() _LOGGER.info("done with now=%s", dt_util.utcnow())
async def check_has_unique_id(entity, ready_callback, timeout_callback): """Wait for entity to have unique_id.""" start_time = dt_util.utcnow() while True: waited = int((dt_util.utcnow() - start_time).total_seconds()) if entity.unique_id: ready_callback(waited) return if waited >= const.NODE_READY_WAIT_SECS: # Wait up to NODE_READY_WAIT_SECS seconds for unique_id to appear. timeout_callback(waited) return await asyncio.sleep(1)
async def test_device_trackers(opp, legacy_patchable_time): """Test device_trackers created by mikrotik.""" # test devices are added from wireless list only hub = await setup_mikrotik_entry(opp) device_1 = opp.states.get("device_tracker.device_1") assert device_1 is not None assert device_1.state == "home" assert device_1.attributes["ip"] == "0.0.0.1" assert "ip_address" not in device_1.attributes assert device_1.attributes["mac"] == "00:00:00:00:00:01" assert device_1.attributes["host_name"] == "Device_1" assert "mac_address" not in device_1.attributes device_2 = opp.states.get("device_tracker.device_2") assert device_2 is None with patch.object(mikrotik.hub.MikrotikData, "command", new=mock_command): # test device_2 is added after connecting to wireless network WIRELESS_DATA.append(DEVICE_2_WIRELESS) await hub.async_update() await opp.async_block_till_done() device_2 = opp.states.get("device_tracker.device_2") assert device_2 is not None assert device_2.state == "home" assert device_2.attributes["ip"] == "0.0.0.2" assert "ip_address" not in device_2.attributes assert device_2.attributes["mac"] == "00:00:00:00:00:02" assert "mac_address" not in device_2.attributes assert device_2.attributes["host_name"] == "Device_2" # test state remains home if last_seen consider_home_interval del WIRELESS_DATA[1] # device 2 is removed from wireless list hub.api.devices["00:00:00:00:00:02"]._last_seen = dt_util.utcnow( ) - timedelta(minutes=4) await hub.async_update() await opp.async_block_till_done() device_2 = opp.states.get("device_tracker.device_2") assert device_2.state != "not_home" # test state changes to away if last_seen > consider_home_interval hub.api.devices["00:00:00:00:00:02"]._last_seen = dt_util.utcnow( ) - timedelta(minutes=5) await hub.async_update() await opp.async_block_till_done() device_2 = opp.states.get("device_tracker.device_2") assert device_2.state == "not_home"