def extract(self, byte_stream: BytesIO):
        """
        Return dict of parsed subpacket data and length of subpacket

        :param byte_stream:
        :type byte_stream:
        :return: parsed_data
        :rtype: Dict[Any, Any]
        """
        if self.big_endian_ints is None or self.big_endian_floats is None:
            raise Exception("Endianness not set before parsing")

        # header extraction
        header: Header = self.header(byte_stream)

        # data extraction
        parsed_data: Dict[Any, Any] = {}
        try:
            parsed_data = self.parse_data(byte_stream, header)
        except Exception as e:
            LOGGER.exception("Error parsing data"
                             )  # Automatically grabs and prints exception info

        parsed_data[DataEntryIds.TIME] = header.timestamp

        self.big_endian_ints = None
        self.big_endian_floats = None
        return parsed_data
    def _run(self):
        LOGGER.debug(
            f"SIM connection started (device_address={self.device_address})")

        try:
            while True:

                id = self.stdout.read(1)[0]  # Returns 0 if process was killed

                if id not in SimConnection.packetHandlers.keys():
                    LOGGER.error(
                        f"SIM protocol violation!!! Shutting down. (device_address={self.device_address})"
                    )
                    for b in self.stdout.getHistory():
                        LOGGER.error(hex(b[0]))
                    LOGGER.error("^^^^ violation.")
                    return

                # Call packet handler
                SimConnection.packetHandlers[id](self)

        except Exception as ex:
            with self._shutdown_lock:
                if not self._is_shutting_down:
                    LOGGER.exception(
                        f"Error in SIM connection. (device_address={self.device_address})"
                    )

        LOGGER.warning(
            f"SIM connection thread shut down (device_address={self.device_address})"
        )
    def timer(self):
        """

        """
        LOGGER.debug("Auto-save thread started")

        while True:

            with self.as_cv:
                self.as_cv.wait_for(lambda: self._as_is_shutting_down,
                                    timeout=AUTOSAVE_INTERVAL_S)

                if self._as_is_shutting_down:
                    break

            try:
                t1 = time.perf_counter()
                self.save(self.session_name)
                t2 = time.perf_counter()
                # LOGGER.debug("Auto-Save Successful.")
                LOGGER.debug(f"Successfully Auto-Saved in {t2 - t1} seconds.")
            except Exception as e:
                LOGGER.exception(
                    "Exception in autosave thread"
                )  # Automatically grabs and prints exception info

        LOGGER.warning("Auto save thread shut down")
    def run(self):
        """

        """
        LOGGER.debug("Send thread started")

        # TODO : Once we have multiple connections, we will loop over and send a config request to each
        # Starting up, request hello/ha ndshake/identification
        for connection in self.connections.values():
            try:
                connection.broadcast(self.command_parser.broadcast_data(CommandType.CONFIG))
            except Exception as ex:
                LOGGER.exception("Exception in send thread while sending config requests")

        while True:
            try:
                message = self.commandQueue.get(block=True, timeout=None)  # Block until something new
                self.commandQueue.task_done()

                if message is None:  # Either received None or woken up for shutdown
                    with self._shutdown_lock:
                        if self._is_shutting_down:
                            break
                        else:
                            continue

                try:
                    (device, command, data) = self.command_parser.pase_command(message)
                except CommandParsingError as ex:
                    LOGGER.error(f"Error parsing command: {str(ex)}")
                    continue

                full_address = self.device_manager.get_full_address(device)
                if full_address is None:
                    LOGGER.error(f"Device not yet connected: {device.name}")
                    continue

                connection = self.connections[full_address.connection_name]

                LOGGER.info(f"Sending command {command.name} to device {device.name} ({full_address})")

                connection.send(full_address.device_address, data)

                LOGGER.info("Sent command!")
                COMMAND_SENT_EVENT.increment()

            except TimeoutException:  # TODO: Connection should have converted this to a generic exception for decoupling
                LOGGER.error("Message timed-out!")

            except queue.Empty:
                pass

            except Exception as ex:
                LOGGER.exception("Unexpected error while sending!")  # Automatically grabs and prints exception info

        LOGGER.warning("Send thread shut down")
Exemple #5
0
    def run(self) -> None:
        """

        """
        LOGGER.debug("Mapping thread started")
        last_latitude = None
        last_longitude = None
        last_desired_size = None
        last_update_time = 0
        while True:
            with self.cv:
                self.cv.wait()  # CV lock is released while waiting
                if self._is_shutting_down:
                    break

            try:
                # Prevent update spam
                current_time = time.time()
                if current_time - last_update_time < 0.5:
                    time.sleep(0.5)

                # copy location values to use, to keep the values consistent in synchronous but adjacent calls
                latitude = self.rocket_data.last_value_by_device(
                    self.device, DataEntryIds.LATITUDE)
                longitude = self.rocket_data.last_value_by_device(
                    self.device, DataEntryIds.LONGITUDE)
                desired_size = self.getDesiredMapSize()

                # Prevent unnecessary work while no location data is received
                if latitude is None or longitude is None:
                    continue

                # Prevent unnecessary work while data hasnt changed
                if (latitude, longitude,
                        desired_size) == (last_latitude, last_longitude,
                                          last_desired_size):
                    continue

                if self.plotMap(latitude, longitude, DEFAULT_RADIUS,
                                DEFAULT_ZOOM):
                    # notify UI that new data is available to be displayed
                    self.sig_received.emit()
                else:
                    continue

                last_latitude = latitude
                last_longitude = longitude
                last_update_time = current_time
                last_desired_size = desired_size

            except Exception:
                LOGGER.exception(
                    "Error in map thread loop"
                )  # Automatically grabs and prints exception info

        LOGGER.warning("Mapping thread shut down")
Exemple #6
0
    def receive_data(self) -> None:
        """
        This is called when new data is available to be displayed.
        :return:
        :rtype:
        """

        for label in self.rocket_profile.labels:
            try:
                getattr(self, label.name + "Label").setText(
                    label.update(self.rocket_data))
            except:
                LOGGER.exception(f'Failed to update {label.name}Label:')

        LABLES_UPDATED_EVENT.increment()
Exemple #7
0
    def _run_self_test(self):
        try:
            LOGGER.info("SELF TEST STARTED")
            snapshot = get_event_stats_snapshot()

            sleep(20)

            # Dont wait, check difference now all at once
            # Add any other common events here
            assert LABLES_UPDATED_EVENT.wait(snapshot, timeout=0) >= 2
            assert MAP_UPDATED_EVENT.wait(snapshot, timeout=0) >= 2

            LOGGER.info("SELF TEST PASSED")
            ret_code = 0

        except AssertionError:
            LOGGER.exception("SELF TEST FAILED")
            ret_code = 1

        self.main_app.shutdown()
        os._exit(ret_code)
def processMap(requestQueue, resultQueue):
    """To be run in a new process as the stitching and resizing is a CPU bound task

    :param requestQueue:
    :type requestQueue: Queue
    :param resultQueue:
    :type resultQueue: Queue
    """

    # On Windows, process forking does not copy globals and thus all packeges are re-imported. Not for threads
    # though.
    # Note: This means that on Windows the logger will create one log file per process because the session ID
    # is based on the import time
    # https://docs.python.org/3/library/multiprocessing.html#logging
    # TODO: Fix by creating .session file which contains session ID and other
    #  process-global constants. Look into file-locks to make this multiprocessing safe. This is an OS feature

    LOGGER.debug("Mapping process started")
    while True:
        try:
            request = requestQueue.get()

            if request is None:  # Shutdown request
                break

            (p0, p1, p2, zoom, desiredSize) = request

            location = mapbox_utils.TileGrid(p1, p2, zoom)
            location.downloadArrayImages()

            largeMapImage = location.genStitchedMap()
            x_min, x_max, y_min, y_max = location.xMin, location.xMax, location.yMin, location.yMax

            if desiredSize is None:
                resizedMapImage = largeMapImage
            else:

                if desiredSize[0] / desiredSize[1] > abs(p1.x - p2.x) / abs(
                        p1.y - p2.y):  # Wider aspect ratio
                    x_crop_size = (abs(p1.x - p2.x) * largeMapImage.shape[1]
                                   ) / (location.xMax - location.xMin)
                    y_crop_size = (x_crop_size *
                                   desiredSize[1]) / desiredSize[0]
                else:  # Taller aspect ratio
                    y_crop_size = (abs(p1.y - p2.y) * largeMapImage.shape[0]
                                   ) / (location.yMax - location.yMin)
                    x_crop_size = (y_crop_size *
                                   desiredSize[0]) / desiredSize[1]

                center_x = (
                    (p0.x - location.xMin) *
                    largeMapImage.shape[1]) / (location.xMax - location.xMin)
                center_y = (
                    (p0.y - location.yMin) *
                    largeMapImage.shape[0]) / (location.yMax - location.yMin)

                # Crop image centered around p0 (point of interest) and at the desired aspect ratio.
                # Crop is largest possible within rectangle defined by p1 & p2
                x_crop_start = round(center_x - x_crop_size / 2)
                x_crop_end = round(x_crop_start + x_crop_size)
                y_crop_start = round(center_y - y_crop_size / 2)
                y_crop_end = round(y_crop_start + y_crop_size)
                croppedMapImage = largeMapImage[y_crop_start:y_crop_end,
                                                x_crop_start:x_crop_end]

                # Check obtained desired aspect ratio (within one pixel)
                assert abs(x_crop_size / y_crop_size -
                           desiredSize[0] / desiredSize[1]) < 1 / max(
                               croppedMapImage.shape[0:2])
                assert croppedMapImage.shape[1] == round(x_crop_size)
                assert croppedMapImage.shape[0] == round(y_crop_size)

                x_min, x_max, y_min, y_max = min(p1.x, p2.x), max(
                    p1.x, p2.x), min(p1.y, p2.y), max(p1.y, p2.y)

                if croppedMapImage.shape[1] < desiredSize[0]:
                    # Dont scale up the image. Waste of memory.
                    resizedMapImage = croppedMapImage
                else:
                    # Downsizing the map here to the ideal size for the plot reduces the amount of work required in the
                    # main thread and thus reduces stuttering
                    resizedMapImage = np.array(
                        Image.fromarray(croppedMapImage).resize(
                            (desiredSize[0], desiredSize[1]
                             )))  # x,y order is opposite for resize

            resultQueue.put((resizedMapImage, x_min, x_max, y_min, y_max))
        except Exception as ex:
            LOGGER.exception("Exception in processMap process"
                             )  # Automatically grabs and prints exception info
            resultQueue.put(None)

    resultQueue.cancel_join_thread()
    requestQueue.cancel_join_thread()
    resultQueue.close()
    requestQueue.close()
    LOGGER.warning("Mapping process shut down")
Exemple #9
0
    def run(self):
        """

        """
        LOGGER.debug("Send thread started")

        # TODO : Once we have multiple connections, we will loop over and send a config request to each
        # Starting up, request hello/ha ndshake/identification
        for connection in self.connections.values():
            try:
                connection.broadcast(bytes([CommandType.CONFIG.value]))
            except Exception as ex:
                LOGGER.exception(
                    "Exception in send thread while sending config requests")

        while True:
            try:
                message = self.commandQueue.get(
                    block=True, timeout=None)  # Block until something new
                self.commandQueue.task_done()

                if message is None:  # Either received None or woken up for shutdown
                    with self._shutdown_lock:
                        if self._is_shutting_down:
                            break
                        else:
                            continue

                message_parts = message.split('.')

                if len(message_parts) != 2:
                    LOGGER.error("Bad command format")
                    continue

                (device_str, command_str) = message_parts

                try:
                    device = DeviceType[device_str.upper()]
                except KeyError:
                    LOGGER.error(f"Unknown device: {device_str}")
                    continue

                full_address = self.device_manager.get_full_address(device)
                if full_address is None:
                    LOGGER.error(f"Device not yet connected: {device.name}")
                    continue

                connection = self.connections[full_address.connection_name]

                try:
                    command = CommandType[command_str.upper()]
                except KeyError:
                    LOGGER.error(f"Unknown command {command_str}")
                    continue

                LOGGER.info(
                    f"Sending command {command} to device {device.name} ({full_address})"
                )

                data = bytes([command.value])
                connection.send(full_address.device_address, data)

                LOGGER.info("Sent command!")
                COMMAND_SENT_EVENT.increment()

            except TimeoutException:  # TODO: Connection should have converted this to a generic exception for decoupling
                LOGGER.error("Message timed-out!")

            except queue.Empty:
                pass

            except Exception as ex:
                LOGGER.exception(
                    "Unexpected error while sending!"
                )  # Automatically grabs and prints exception info

        LOGGER.warning("Send thread shut down")
    def run(self):
        """This thread loop waits for new data and processes it when available"""
        LOGGER.debug("Read thread started")
        while True:

            connection_message = self.dataQueue.get(
                block=True, timeout=None)  # Block until something new
            self.dataQueue.task_done()

            if connection_message is None:  # Either received None or woken up for shutdown
                with self._shutdown_lock:
                    if self._is_shutting_down:
                        break
                    else:
                        continue

            connection = connection_message.connection
            full_address = FullAddress(
                connection_name=self.connection_to_name[connection],
                device_address=connection_message.device_address)
            data = connection_message.data

            byte_stream: BytesIO = BytesIO(data)

            # Get length of bytes (without using len(data) for decoupling)
            byte_stream.seek(0, SEEK_END)
            end = byte_stream.tell()
            byte_stream.seek(0)

            # Iterate over stream to extract subpackets where possible
            while byte_stream.tell() < end:
                try:
                    self.packet_parser.set_endianness(
                        connection.isIntBigEndian(),
                        connection.isFloatBigEndian())
                    parsed_data: Dict[DataEntryIds,
                                      any] = self.packet_parser.extract(
                                          byte_stream)

                    if DataEntryIds.DEVICE_TYPE in parsed_data and DataEntryIds.VERSION_ID in parsed_data:
                        self.device_manager.register_device(
                            parsed_data[DataEntryIds.DEVICE_TYPE],
                            parsed_data[DataEntryIds.VERSION_ID], full_address)
                    elif DataEntryIds.DEVICE_TYPE in parsed_data:
                        LOGGER.warning(
                            'Received DEVICE_TYPE but not VERSION_ID')
                    elif DataEntryIds.VERSION_ID in parsed_data:
                        LOGGER.warning(
                            'Received VERSION_ID but not DEVICE_TYPE')

                    self.rocket_data.add_bundle(full_address, parsed_data)

                    # notify UI that new data is available to be displayed
                    self.sig_received.emit()
                except Exception as e:
                    LOGGER.exception("Error decoding new packet! %s", e)
                    # Just discard rest of data TODO Review policy on handling remaining data or problem packets. Consider data errors too
                    byte_stream.seek(0, SEEK_END)

            CONNECTION_MESSAGE_READ_EVENT.increment()

        LOGGER.warning("Read thread shut down")