async def test_znp_nvram_writes(znp, event_loop): # Passing numerical addresses is disallowed because we can't check for overflows with pytest.raises(ValueError): await znp.nvram_write(0x0003, t.uint8_t(0xAB)) # Neither is passing in untyped integers with pytest.raises(TypeError): await znp.nvram_write(nvids.NwkNvIds.STARTUP_OPTION, 0xAB) # This, however, should work assert nvids.NwkNvIds.STARTUP_OPTION == 0x0003 event_loop.call_soon( znp.frame_received, c.SYS.OSALNVWrite.Rsp(Status=t.Status.SUCCESS).to_frame(), ) await znp.nvram_write(nvids.NwkNvIds.STARTUP_OPTION, t.uint8_t(0xAB)) znp._uart.send.assert_called_once_with( c.SYS.OSALNVWrite.Req(Id=nvids.NwkNvIds.STARTUP_OPTION, Offset=0x00, Value=t.ShortBytes(b"\xAB")).to_frame()) znp._uart.send.reset_mock() # As should explicitly serializing the value to bytes event_loop.call_soon( znp.frame_received, c.SYS.OSALNVWrite.Rsp(Status=t.Status.SUCCESS).to_frame(), ) await znp.nvram_write(nvids.NwkNvIds.STARTUP_OPTION, t.uint8_t(0xAB).serialize()) znp._uart.send.assert_called_once_with( c.SYS.OSALNVWrite.Req(Id=nvids.NwkNvIds.STARTUP_OPTION, Offset=0x00, Value=t.ShortBytes(b"\xAB")).to_frame()) znp._uart.send.reset_mock() # And passing in bytes directly event_loop.call_soon( znp.frame_received, c.SYS.OSALNVWrite.Rsp(Status=t.Status.SUCCESS).to_frame(), ) await znp.nvram_write(nvids.NwkNvIds.STARTUP_OPTION, b"\xAB") znp._uart.send.assert_called_once_with( c.SYS.OSALNVWrite.Req(Id=nvids.NwkNvIds.STARTUP_OPTION, Offset=0x00, Value=t.ShortBytes(b"\xAB")).to_frame()) znp._uart.send.reset_mock() # The SYS_OSAL_NV_WRITE response status should be checked event_loop.call_soon( znp.frame_received, c.SYS.OSALNVWrite.Rsp(Status=t.Status.FAILURE).to_frame(), ) with pytest.raises(InvalidCommandResponse): await znp.nvram_write(nvids.NwkNvIds.STARTUP_OPTION, t.uint8_t(0xAB))
def _receive_zdo_message(self, cluster: ZDOCmd, *, tsn: t.uint8_t, sender, **zdo_kwargs) -> None: field_names, field_types = ZDO_CLUSTERS[cluster] assert set(zdo_kwargs) == set(field_names) zdo_args = [] for f_name, f_type in zip(field_names, field_types): # XXX: bugfix for zigpy optional struct bug if f_type.__name__ == "Optional": f_type = f_type.__mro__[1] zdo_args.append(f_type(zdo_kwargs[f_name])) message = t.serialize_list([t.uint8_t(tsn)] + zdo_args) LOGGER.trace("Pretending we received a ZDO message: %s", message) self.handle_message( sender=sender, profile=zigpy.profiles.zha.PROFILE_ID, cluster=cluster, src_ep=ZDO_ENDPOINT, dst_ep=ZDO_ENDPOINT, message=message, )
async def test_update_network_bad_channel(mocker, caplog, application): app, znp_server = application with pytest.raises(ValueError): # 12 is not in the mask await app.update_network( channel=t.uint8_t(12), channels=t.Channels.from_channel_list([11, 15, 20]), )
async def _send_zdo_request(self, dst_addr, dst_ep, src_ep, cluster, sequence, options, radius, data): """ Zigpy doesn't send ZDO requests via TI's ZDO_* MT commands, so it will never receive a reply because ZNP intercepts ZDO replies, never sends a DataConfirm, and instead replies with one of its ZDO_* MT responses. This method translates the ZDO_* MT response into one zigpy can handle. """ LOGGER.trace( "Intercepted a ZDO request: dst_addr=%s, dst_ep=%s, src_ep=%s, " "cluster=%s, sequence=%s, options=%s, radius=%s, data=%s", dst_addr, dst_ep, src_ep, cluster, sequence, options, radius, data, ) assert dst_ep == ZDO_ENDPOINT rsp_cluster, req_factory, callback_factory, converter = ZDO_CONVERTERS[ cluster] request = req_factory(dst_addr.address, ep=src_ep) callback = callback_factory(dst_addr.address) LOGGER.debug("Intercepted AP ZDO request and replaced with %s - %s", request, callback) async with async_timeout.timeout(ZDO_REQUEST_TIMEOUT): response = await self._znp.request_callback_rsp( request=request, RspStatus=t.Status.Success, callback=callback) device = self.get_device(nwk=dst_addr.address) # Build up a ZDO response message = t.serialize_list( [t.uint8_t(sequence), response.Status, response.NWK] + converter(response, device)) LOGGER.trace("Pretending we received a ZDO message: %s", message) # We do not get any LQI info here self.handle_message( sender=device, profile=zigpy.profiles.zha.PROFILE_ID, cluster=rsp_cluster, src_ep=dst_ep, dst_ep=src_ep, message=message, ) return response.Status, "Request sent successfully"
def test_serialize_list(): class TestList(t.LVList, item_type=t.uint8_t, length_type=t.uint8_t): pass assert (t.serialize_list( [t.uint8_t(0xF0), t.Bytes(b"asd"), TestList([0xAB, 0xCD])]) == b"\xF0asd\x02\xAB\xCD") assert t.serialize_list([]) == b""
class TransportFrame: """Transport frame.""" SOF = t.uint8_t(0xFE) payload: GeneralFrame = attr.ib() @classmethod def deserialize(cls, data: bytes) -> typing.Tuple["TransportFrame", bytes]: """Deserialize frame.""" sof, data = t.uint8_t.deserialize(data) if sof != cls.SOF: raise InvalidFrame( f"Expected frame to start with SOF 0x{cls.SOF:02X}, got 0x{sof:02X}" ) gen_frame, data = GeneralFrame.deserialize(data) checksum, data = t.uint8_t.deserialize(data) frame = cls(gen_frame) if frame.checksum() != checksum: raise InvalidFrame( f"Invalid frame checksum for data {gen_frame}: " f"expected 0x{frame.checksum():02X}, got 0x{checksum:02X}") return frame, data def checksum(self) -> t.uint8_t: """Calculate FCS on the payload.""" checksum = functools.reduce(lambda a, b: a ^ b, self.payload.serialize()) return t.uint8_t(checksum) def serialize(self) -> bytes: """Serialize data.""" return (self.SOF.serialize() + self.payload.serialize() + self.checksum().serialize())
async def test_update_network(mocker, caplog, application): app, znp_server = application await app.startup(auto_form=False) mocker.patch.object(app, "_reset", new=CoroutineMock()) channel = t.uint8_t(15) pan_id = t.PanId(0x1234) extended_pan_id = t.ExtendedPanId(range(8)) channels = t.Channels.from_channel_list([11, 15, 20]) network_key = t.KeyData(range(16)) channels_updated = znp_server.reply_once_to( request=c.Util.SetChannels.Req(Channels=channels), responses=[c.Util.SetChannels.Rsp(Status=t.Status.SUCCESS)], ) bdb_set_primary_channel = znp_server.reply_once_to( request=c.AppConfig.BDBSetChannel.Req(IsPrimary=True, Channel=channels), responses=[c.AppConfig.BDBSetChannel.Rsp(Status=t.Status.SUCCESS)], ) bdb_set_secondary_channel = znp_server.reply_once_to( request=c.AppConfig.BDBSetChannel.Req(IsPrimary=False, Channel=t.Channels.NO_CHANNELS), responses=[c.AppConfig.BDBSetChannel.Rsp(Status=t.Status.SUCCESS)], ) set_pan_id = znp_server.reply_once_to( request=c.Util.SetPanId.Req(PanId=pan_id), responses=[c.Util.SetPanId.Rsp(Status=t.Status.SUCCESS)], ) set_extended_pan_id = znp_server.reply_once_to( request=c.SYS.OSALNVWrite.Req(Id=NwkNvIds.EXTENDED_PAN_ID, Offset=0, Value=extended_pan_id.serialize()), responses=[c.SYS.OSALNVWrite.Rsp(Status=t.Status.SUCCESS)], ) set_network_key_util = znp_server.reply_once_to( request=c.Util.SetPreConfigKey.Req(PreConfigKey=network_key), responses=[c.Util.SetPreConfigKey.Rsp(Status=t.Status.SUCCESS)], ) set_network_key_nvram = znp_server.reply_once_to( request=c.SYS.OSALNVWrite.Req(Id=NwkNvIds.PRECFGKEYS_ENABLE, Offset=0, Value=t.Bool(True).serialize()), responses=[c.SYS.OSALNVWrite.Rsp(Status=t.Status.SUCCESS)], ) set_nib_nvram = znp_server.reply_once_to( request=c.SYS.OSALNVWrite.Req(Id=NwkNvIds.NIB, Offset=0, partial=True), responses=[c.SYS.OSALNVWrite.Rsp(Status=t.Status.SUCCESS)], ) # But it does succeed with a warning if you explicitly allow it with caplog.at_level(logging.WARNING): await app.update_network( channel=channel, channels=channels, extended_pan_id=extended_pan_id, network_key=network_key, pan_id=pan_id, tc_address=t.EUI64(range(8)), tc_link_key=t.KeyData(range(8)), update_id=0, reset=True, ) # We should receive a few warnings for `tc_` stuff assert len(caplog.records) >= 2 await channels_updated await bdb_set_primary_channel await bdb_set_secondary_channel await set_pan_id await set_extended_pan_id await set_network_key_util await set_network_key_nvram await set_nib_nvram app._reset.assert_called_once_with() # Ensure we set everything we could assert app.nwk_update_id is None # We can't use it assert app.channel == channel assert app.channels == channels assert app.pan_id == pan_id assert app.extended_pan_id == extended_pan_id
def id(self) -> t.uint8_t: """Return CommandHeader id.""" return t.uint8_t(self >> 8)
def cmd0(self) -> t.uint8_t: return t.uint8_t(self & 0x00FF)
def test_int_out_of_bounds(): with pytest.raises(ValueError): t.uint8_t(-1) with pytest.raises(ValueError): t.uint8_t(0xFF + 1)
async def startup(self, auto_form=False): """Perform a complete application startup""" znp = ZNP(self.config) znp.set_application(self) self._bind_callbacks(znp) await znp.connect() self._znp = znp # XXX: To make sure we don't switch to the wrong device upon reconnect, # update our config to point to the last-detected port. if self._config[conf.CONF_DEVICE][conf.CONF_DEVICE_PATH] == "auto": self._config[conf.CONF_DEVICE][ conf.CONF_DEVICE_PATH] = self._znp._uart.transport.serial.name # It's better to configure these explicitly than rely on the NVRAM defaults await self._znp.nvram_write(NwkNvIds.CONCENTRATOR_ENABLE, t.Bool(True)) await self._znp.nvram_write(NwkNvIds.CONCENTRATOR_DISCOVERY, t.uint8_t(120)) await self._znp.nvram_write(NwkNvIds.CONCENTRATOR_RC, t.Bool(True)) await self._znp.nvram_write(NwkNvIds.SRC_RTG_EXPIRY_TIME, t.uint8_t(255)) await self._znp.nvram_write(NwkNvIds.NWK_CHILD_AGE_ENABLE, t.Bool(False)) # XXX: the undocumented `znpBasicCfg` request can do this await self._znp.nvram_write(NwkNvIds.LOGICAL_TYPE, t.DeviceLogicalType.Coordinator) # Reset to make the above NVRAM writes take effect. # This also ensures any previously-started network joins don't continue. await self._reset() try: is_configured = (await self._znp.nvram_read( NwkNvIds.HAS_CONFIGURED_ZSTACK3)) == b"\x55" except InvalidCommandResponse as e: assert e.response.Status == t.Status.INVALID_PARAMETER is_configured = False if not is_configured and not auto_form: raise RuntimeError( "Cannot start application, network is not formed") elif auto_form and is_configured: LOGGER.info( "ZNP is already configured, no need to form a network.") elif auto_form and not is_configured: await self.form_network() if self.config[conf.CONF_ZNP_CONFIG][conf.CONF_TX_POWER] is not None: dbm = self.config[conf.CONF_ZNP_CONFIG][conf.CONF_TX_POWER] await self._znp.request(c.SYS.SetTxPower.Req(TXPower=dbm), RspStatus=t.Status.SUCCESS) device_info = await self._znp.request(c.Util.GetDeviceInfo.Req(), RspStatus=t.Status.SUCCESS) self._ieee = device_info.IEEE if device_info.DeviceState != t.DeviceState.StartedAsCoordinator: # Start the application and wait until it's ready await self._znp.request_callback_rsp( request=c.ZDO.StartupFromApp.Req(StartDelay=100), RspState=c.zdo.StartupState.RestoredNetworkState, callback=c.ZDO.StateChangeInd.Callback( State=t.DeviceState.StartedAsCoordinator), ) # Get our active endpoints endpoints = await self._znp.request_callback_rsp( request=c.ZDO.ActiveEpReq.Req(DstAddr=0x0000, NWKAddrOfInterest=0x0000), RspStatus=t.Status.SUCCESS, callback=c.ZDO.ActiveEpRsp.Callback(partial=True), ) # Clear out the list of active endpoints for endpoint in endpoints.ActiveEndpoints: await self._znp.request(c.AF.Delete.Req(Endpoint=endpoint), RspStatus=t.Status.SUCCESS) # Register our endpoints await self._register_endpoint(endpoint=1) await self._register_endpoint( endpoint=8, device_id=zigpy.profiles.zha.DeviceType.IAS_CONTROL, output_clusters=[clusters.security.IasZone.cluster_id], ) await self._register_endpoint(endpoint=11) await self._register_endpoint(endpoint=12) await self._register_endpoint( endpoint=13, input_clusters=[clusters.general.Ota.cluster_id]) await self._register_endpoint(endpoint=100, profile_id=zigpy.profiles.zll.PROFILE_ID, device_id=0x0005) # Structure is in `zstack/stack/nwk/nwk.h` nib = await self._znp.nvram_read(NwkNvIds.NIB) self._channel = nib[24] self._channels = t.Channels.deserialize(nib[40:44])[0]
def checksum(self) -> t.uint8_t: """Calculate FCS on the payload.""" checksum = functools.reduce(lambda a, b: a ^ b, self.payload.serialize()) return t.uint8_t(checksum)
def length(self) -> t.uint8_t: """Length of the frame.""" return t.uint8_t(len(self.data))