def unpack_and_splice(cls, version, buf): result = cls() if version < 3: buf, (result.timeouts,) = unpack_and_splice( buf, cls._v2 ) result.configure_status = 0x00 else: buf, (result.configure_status, result.timeouts,) = \ unpack_and_splice( buf, cls._v3 ) return buf, result
def from_buf(cls, rtc_timestamp, buf): buf, argv = unpack_and_splice( buf, cls._v1 ) return cls(rtc_timestamp, *argv)
def _handle_data(self, remainder, valid_connection, **kwargs): if not valid_connection: self.logger.debug("ignoring DATA from unknown connection") return first_sn = None while remainder: remainder, (sn, length) = unpack_and_splice(remainder, data_entry_header_fmt) sn = SerialNumber(self.SERIAL_BITS, sn) if first_sn is None: first_sn = sn payload = remainder[:length] remainder = remainder[length:] self._handle_data_entry(sn, payload) delete_up_to = 0 for i, (recvd_sn, payload) in enumerate(self._rx_buffer): delete_up_to = i if recvd_sn > self._rx_max_consecutive_sn: break self.logger.debug("emitting event for %r", payload) self.on_data_received(payload) else: delete_up_to = len(self._rx_buffer) self.logger.debug("rx max = %s, delete_up_to = %d, rx buffer = %r", self._rx_max_consecutive_sn, delete_up_to, self._rx_buffer) del self._rx_buffer[:delete_up_to] self._rx_last_sn = first_sn self._emit_ack()
def unpack_and_splice(cls, version, buf): result = cls() buf, (result.transaction_overruns,) = unpack_and_splice( buf, cls._v2 ) return buf, result
def unpack_and_splice(cls, version, buf): buf, (seq, ts, period) = unpack_and_splice( buf, cls._v1 ) period = timedelta(milliseconds=period) return buf, cls(seq, ts, period)
def _handle_app_req(self, remainder, addr, **kwargs): remainder, (request_id, type_) = unpack_and_splice( remainder, app_req_header_fmt, ) self.logger.debug("app request 0x%08x: request received (type=%r)", request_id, type_) if self.app_request_handler is not None: try: response = self.app_request_handler(type_, remainder) except: # NOQA self.logger.exception( "app request 0x%08x: app request handler failed on payload " "(type=%r) %r", request_id, type_, remainder, ) packet = b"".join([ self._compose_common_header(PacketType.APP_RESP), app_resp_header_fmt.pack(request_id), response, ]) self._tx(packet, dest=addr) else: self.logger.warning( "app request 0x%08x: received app request, but no handler " "installed. assign a callable to the app_request_handler " "attribute", request_id, )
def from_buf(cls, type_, buf): buf, (factor,) = unpack_and_splice(buf, cls._header) return cls( [ (ts, sqavg / (2**24-1) / factor, min_, max_) for ts, sqavg, min_, max_ in unpack_all(buf, cls._sample) ], type_=type_ )
def unpack_and_splice(cls, version, buf): buf, (most_buffers_allocated, buffers_allocated, buffers_ready, buffers_total) = unpack_and_splice( buf, cls._v1, ) return buf, cls(most_buffers_allocated, buffers_allocated, buffers_ready, buffers_total)
def _handle_dack(self, remainder, valid_connection, **kwargs): if not valid_connection: self.logger.debug("ignoring DACK from unknown connection") return while remainder: remainder, (first, last) = unpack_and_splice( remainder, dack_entry_fmt, ) while first <= last: self._mark_received_remotely_single(first) first += 1
def from_buf(cls, type_, buf): buf, (timestamp,) = unpack_and_splice( buf, cls._header, ) return cls( timestamp, ( (id_, value/16) for id_, value in unpack_all(buf, cls._sample) ), type_=type_, )
def from_buf(cls, type_, buf): buf, (seq, reference) = unpack_and_splice( buf, cls._header, ) data = sensor_stream.decompress( reference, buf ) return cls( type_, seq, data, )
def unpack_and_splice(cls, version, buf): buf, (*data,) = unpack_and_splice( buf, cls._v1, ) idle = data[lib.CPU_IDLE] sched = data[lib.CPU_SCHED] interrupts = { name: data[index] for index, name in cls.INTERRUPT_MAP.items() } tasks = data[lib.CPU_TASK_BASE:] return buf, cls(idle, sched, interrupts, tasks)
def unpack_and_splice(cls, version, buf): buf, (count, idle_ticks) = unpack_and_splice( buf, cls._v1, ) tasks = [] for i in range(count): buf, task = cls.TaskMetrics.unpack_and_splice( version, buf, ) tasks.append(task) return buf, cls(idle_ticks, tuple(tasks))
def _handle_app_resp(self, remainder, **kwargs): remainder, (request_id, ) = unpack_and_splice( remainder, app_resp_header_fmt, ) self.logger.debug("app request 0x%08x: response received", request_id) try: fut = self._rx_app_requests[request_id] except KeyError: self.logger.debug( "app request 0x%08x: no response future. " "late response?", request_id) else: fut.set_result(remainder)
def _on_datagram(self, remainder): remainder, (rtc_timestamp, type_raw) = unpack_and_splice( remainder, data_frame_header_fmt, ) rtc_timestamp = datetime.utcfromtimestamp(rtc_timestamp) try: type_ = DataFrameType(type_raw) except ValueError: self.logger.error("invalid data frame type: %r", type_raw) return if type_ == DataFrameType.SBX: try: obj = decode_sbx_message(remainder) except Exception: # NOQA self.logger.warning("failed to decode SBX message", exc_info=True) else: self.on_message( rtc_timestamp, obj, ) elif type_ == DataFrameType.ESP_STATUS: try: obj = ESPStatusMessage.from_buf(rtc_timestamp, remainder) except Exception: self.logger.warning("failed to decode ESP status message %r", remainder, exc_info=True) else: self.on_message( rtc_timestamp, obj, ) else: self.logger.debug("no handler for %s data frame", type_)
def from_buf(cls, type_, buf): buf, (timestamp, instance, dig88, dige1, readout) = unpack_and_splice(buf, cls._message) if buf: raise ValueError("too much data in buffer") calibration = bme280.get_calibration(dig88, dige1) temp_raw, pressure_raw, humidity_raw = bme280.get_readout(readout) temperature = bme280.compensate_temperature( calibration, temp_raw, ) pressure = bme280.compensate_pressure( calibration, pressure_raw, temperature, ) humidity = bme280.compensate_humidity( calibration, humidity_raw, temperature, ) return cls( timestamp, temperature, pressure, humidity, type_=type_, instance=instance, )
def datagram_received(self, data, addr): if (self._rx_loss_emulation and random.random() < self._rx_loss_emulation): self.logger.debug("dropping datagram for packet loss emulation") return if len(data) < common_header_fmt.size: self.logger.warning( "dropping short datagram (len %d < header size %d)", len(data), common_header_fmt.size, ) return data, common_hdr = unpack_and_splice(data, common_header_fmt) version, packet_type, connection_id, min_avail_sn, max_recvd_sn, \ last_recvd_sn = common_hdr if version != 0x00: self.logger.warning( "dropping datagram with unsupported version (%d)", version, ) return try: packet_type = PacketType(packet_type) except ValueError: self.logger.warning( "dropping datagram with unknown packet type (%d)", packet_type, ) return min_avail_sn = SerialNumber(self.SERIAL_BITS, min_avail_sn) max_recvd_sn = SerialNumber(self.SERIAL_BITS, max_recvd_sn) last_recvd_sn = SerialNumber(self.SERIAL_BITS, last_recvd_sn) self.logger.debug( "datagram: packet_type = %s, connection_id = 0x%08x, " "min_avail_sn = %s, max_recvd_sn = %s, last_recvd_sn = %s", packet_type, connection_id, min_avail_sn, max_recvd_sn, last_recvd_sn, ) valid_connection = (connection_id and connection_id == self._connection_id) if valid_connection: self._mark_received_remotely_up_to(max_recvd_sn) self._mark_received_remotely_single(last_recvd_sn) self._tx_last_acked_sn = last_recvd_sn self.logger.debug("tx buffer is now: %r", self._tx_buffer) else: self.logger.debug( "datagram does not belong to handshaked connection") if ((packet_type == PacketType.DATA or packet_type == PacketType.DACK) and not valid_connection and self._autohandshake): if not connection_id: connection_id = random.getrandbits(32) self.logger.info( "uninitialized connection id received, syncing with 0x%08x", connection_id, ) else: self.logger.info("unknown connection id received, syncing") self._connection_id = connection_id self._flush_rx_buffer() self._rx_out_of_order.clear() self._rx_max_consecutive_sn = min_avail_sn - 1 self._tx_last_acked_sn = self._tx_sn.current self._tx_dest_addr = addr self.synchronized.set() self.on_resync() valid_connection = True handler_name = "_handle_{}".format(packet_type.name.lower()) try: handler = getattr(self, handler_name) except AttributeError: self.logger.debug( "no handler to handle type %s", packet_type, ) else: try: handler(data, valid_connection=valid_connection, addr=addr) except: # NOQA self.logger.exception( "failed to process packet: %r", data, ) if valid_connection: # discard state for everything up to min_avail_sn self._rx_out_of_order.discard_up_to(min_avail_sn) if self._rx_max_consecutive_sn < min_avail_sn: self.logger.debug("giving up on receiving frames") self._rx_max_consecutive_sn = min_avail_sn
def unpack_and_splice(cls, version, buf): buf, (cpu_ticks,) = unpack_and_splice( buf, cls._v1, ) return buf, cls(cpu_ticks)
def from_buf(cls, type_, buf): result = cls() result.type_ = type_ buf, (rtc, uptime, protocol_version, status_version) = unpack_and_splice( buf, cls._base_header, ) if protocol_version != 1: raise ValueError("unsupported protocol") if status_version > 6: raise ValueError("unsupported status version") result.rtc = None result.uptime = uptime if 1 <= status_version: buf, result.v1_accel_stream_state = \ cls.IMUStreamState.unpack_and_splice(status_version, buf) buf, result.v1_compass_stream_state = \ cls.IMUStreamState.unpack_and_splice(status_version, buf) if 2 <= status_version: result.v2_i2c_metrics = [] for i2c_bus_no in range(2): buf, metrics = cls.I2CMetrics.unpack_and_splice( status_version, buf, ) result.v2_i2c_metrics.append(metrics) if status_version >= 4: result.v4_bme280_metrics = [] buf, metrics = \ cls.BME280Metrics.unpack_and_splice( status_version, buf, ) result.v4_bme280_metrics.append(metrics) buf, metrics = \ cls.BME280Metrics.unpack_and_splice( status_version, buf, ) result.v4_bme280_metrics.append(metrics) result.v2_bme280_metrics = result.v4_bme280_metrics[0] else: buf, result.v2_bme280_metrics = \ cls.BME280Metrics.unpack_and_splice( status_version, buf, ) result.v4_bme280_metrics = [ result.v2_bme280_metrics, cls.BME280Metrics(), ] if 5 <= status_version: buf, result.v5_tx_metrics = cls.TXMetrics.unpack_and_splice( status_version, buf, ) if 5 <= status_version < 6: buf, result.v5_task_metrics = cls.TasksMetrics.unpack_and_splice( status_version, buf, ) if 6 <= status_version: buf, result.v6_cpu_metrics = cls.CPUMetrics.unpack_and_splice( status_version, buf, ) return result