class deviceSideProcessor():
    """
    Packages the image rectification and AprilTag detection for images.
    """
    def __init__(self, options):
        self.ImageRectifier = None
        self.opt_beautify = options.get("beautify", False)
        self.aprilTagProcessor = aprilTagProcessor(options.get("tag_size", 0.065))

    def process(self, raw_image, cameraMatrix, distCoeffs):

        # 0. Initialize the image rectifier if it hasn't been already (that's done so that we don't recompute the remapping)
        if self.ImageRectifier is None:
            self.ImageRectifier =  ImageRectifier(raw_image, cameraMatrix, distCoeffs)

        # 1. Rectify the raw image and get the new camera cameraMatrix
        rect_image, newCameraMatrix = self.ImageRectifier.rectify(raw_image)

        # 2. Extract april tags data
        if len(rect_image.shape) == 3:
            tect_image_gray = cv2.cvtColor(rect_image, cv2.COLOR_BGR2GRAY)
        else:
            tect_image_gray = rect_image

        # Beautify if wanted:
        if self.opt_beautify and self.ImageRectifier:
            raw_image = self.ImageRectifier.beautify(raw_image)

        aprilTags = self.aprilTagProcessor.configureDetector(newCameraMatrix).detect(tect_image_gray, return_image=False)

        # 3. Extract poses from april tags data
        aprilTags = self.aprilTagProcessor.resultsToPose(aprilTags)

        # 4. Package output
        outputDict = dict()
        outputDict["rect_image"] = rect_image
        outputDict["new_camera_matrix"] = newCameraMatrix
        outputDict["apriltags"] = list()
        for atag in aprilTags:
            outputDict["apriltags"].append({'tag_id': atag.tag_id,
                                            'goodness': atag.goodness,
                                            'corners': atag.corners,
                                            'qvec': atag.qvec,
                                            'tvec': atag.tvec })
        return outputDict
class ImageProcessor(multiprocessing.Process):
    """
    Packages the image rectification and AprilTag detection for images.
    """
    def __init__(self, options, logger, publishers, aprilTagProcessor,
                 publish_queue, config, image_queue):
        super(ImageProcessor, self).__init__()
        self.logger = logger
        self.ImageRectifier = None
        self.publish_queue = publish_queue
        self.image_queue = image_queue
        self.bridge = CvBridge()
        self.aprilTagProcessor = aprilTagProcessor
        self.publishers = publishers
        self.opt_beautify = options.get('beautify', False)
        self.tag_size = options.get('tag_size', 0.065)

        self.ACQ_DEVICE_NAME = config['ACQ_DEVICE_NAME']
        self.ACQ_TEST_STREAM = config['ACQ_TEST_STREAM']

        self.raw_image = None
        self.camera_info = None
        self.seq_stamper = None
        self.total_time = 0.0
        self.min_time = 10000000000000.0
        self.max_time = -1.0
        self.iterations = 0.0

    def run(self):
        while True:

            (self.raw_image, self.camera_info,
             self.seq_stamper) = self.image_queue.get(block=True)

            t0 = time.time()

            # cv_image = cv2.imread('/home/galanton/duckietown/cslam_aruco_detector/ros/src/example_image.jpg',
            #                       cv2.IMREAD_ANYCOLOR)
            cv_image = self.bridge.compressed_imgmsg_to_cv2(
                self.raw_image, desired_encoding='mono8')

            # Scale the K matrix if the image resolution is not the same as in the calibration
            currRawImage_height = cv_image.shape[0]
            currRawImage_width = cv_image.shape[1]

            scale_matrix = np.ones(9)
            if self.camera_info.height != currRawImage_height or self.camera_info.width != currRawImage_width:
                scale_width = float(currRawImage_width) / \
                    self.camera_info.width
                scale_height = float(currRawImage_height) / \
                    self.camera_info.height

                scale_matrix[0] *= scale_width
                scale_matrix[2] *= scale_width
                scale_matrix[4] *= scale_height
                scale_matrix[5] *= scale_height

            outputDict = dict()

            # Process the image and extract the apriltags
            outputDict = self.process(
                cv_image, currRawImage_width, currRawImage_height,
                (np.array(self.camera_info.K) * scale_matrix).reshape(
                    (3, 3)), self.camera_info.D)
            outputDict['header'] = self.raw_image.header
            # outputDict['header'] = rospy.Header

            # Add the time stamp and source of the input image to the output
            for idx in range(len(outputDict['apriltags'])):
                # outputDict['apriltags'][idx]['timestamp_secs'] = 123
                # outputDict['apriltags'][idx]['timestamp_nsecs'] = 42
                outputDict['apriltags'][idx][
                    'timestamp_secs'] = self.raw_image.header.stamp.secs
                outputDict['apriltags'][idx][
                    'timestamp_nsecs'] = self.raw_image.header.stamp.nsecs
                outputDict['apriltags'][idx]['source'] = self.ACQ_DEVICE_NAME

            # Generate a diagnostic image
            if self.ACQ_TEST_STREAM == 1:
                image = np.copy(outputDict['rect_image'])

                # Put the AprilTag bound boxes and numbers to the image
                for tag in outputDict['apriltags']:
                    for idx in range(len(tag['corners'])):
                        cv2.line(image,
                                 tuple(tag['corners'][idx - 1, :].astype(int)),
                                 tuple(tag['corners'][idx, :].astype(int)),
                                 (0, 255, 0))
                        # cv2.rectangle(image, (tag['corners'][0, 0].astype(int)-10,tag['corners'][0, 1].astype(int)-10), (tag['corners'][0, 0].astype(int)+15,tag['corners'][0, 1].astype(int)+15), (0, 0, 255), cv2.FILLED)
                    cv2.putText(image,
                                str(tag['tag_id']),
                                org=(tag['corners'][0, 0].astype(int) + 10,
                                     tag['corners'][0, 1].astype(int) + 10),
                                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=1.0,
                                thickness=2,
                                color=(255, 0, 0))

                # Put device and timestamp to the image
                cv2.putText(image,
                            'device: ' + self.ACQ_DEVICE_NAME +
                            ', timestamp: ' +
                            str(self.raw_image.header.stamp.secs) + '+' +
                            str(self.raw_image.header.stamp.nsecs),
                            org=(30, 30),
                            fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=1.0,
                            thickness=2,
                            color=(255, 0, 0))

                # Add the original and diagnostic info to the outputDict
                outputDict[
                    'test_stream_image'] = self.bridge.cv2_to_compressed_imgmsg(
                        image, dst_format='png')
                outputDict[
                    'test_stream_image'].header.stamp.secs = self.raw_image.header.stamp.secs
                outputDict[
                    'test_stream_image'].header.stamp.nsecs = self.raw_image.header.stamp.nsecs
                outputDict[
                    'test_stream_image'].header.frame_id = self.ACQ_DEVICE_NAME

                outputDict['raw_image'] = self.raw_image
                outputDict['raw_camera_info'] = self.camera_info

                outputDict[
                    'rectified_image'] = self.bridge.cv2_to_compressed_imgmsg(
                        outputDict['rect_image'], dst_format='png')
                outputDict[
                    'rectified_image'].header.stamp.secs = self.raw_image.header.stamp.secs
                outputDict[
                    'rectified_image'].header.stamp.nsecs = self.raw_image.header.stamp.nsecs
                outputDict[
                    'rectified_image'].header.frame_id = self.ACQ_DEVICE_NAME

            t1 = time.time()
            self.total_time += t1 - t0
            self.iterations += 1
            self.min_time = min(t1 - t0, self.min_time)
            self.max_time = max(t1 - t0, self.max_time)
            # print("%05d  %5.4f  %5.4f  %5.4f  %5.4f  %5.4f" %
            #       (self.iterations, self.total_time, self.min_time, self.max_time, self.total_time / self.iterations, t1 - t0))

            # PUBLISH HERE
            self.publish(outputDict)

    def process(self, raw_image, width, height, cameraMatrix, distCoeffs):
        """
        Processes an image.
        """

        try:
            # 0. Initialize the image rectifier if it hasn't been already (that's done so that we don't recompute the remapping)
            if self.ImageRectifier is None:
                self.ImageRectifier = ImageRectifier(raw_image, cameraMatrix,
                                                     distCoeffs)

            # 1. Rectify the raw image and get the new camera cameraMatrix
            rect_image, newCameraMatrix = self.ImageRectifier.rectify(
                raw_image)

            # 2. Extract april tags data
            if len(rect_image.shape) == 3:
                rect_image_gray = cv2.cvtColor(rect_image, cv2.COLOR_BGR2GRAY)
            else:
                rect_image_gray = rect_image

            if len(raw_image.shape) == 3:
                raw_image_gray = cv2.cvtColor(raw_image, cv2.COLOR_BGR2GRAY)
            else:
                raw_image_gray = raw_image

            # Beautify if wanted:
            if self.opt_beautify and self.ImageRectifier:
                raw_image = self.ImageRectifier.beautify(raw_image)

            # 3. Extract poses from april tags data
            tags = self.aprilTagProcessor.detect(raw_image_gray, width, height,
                                                 cameraMatrix, distCoeffs)

            # 4. Package output
            outputDict = dict()
            outputDict['rect_image'] = rect_image
            outputDict['new_camera_matrix'] = newCameraMatrix
            outputDict['apriltags'] = list()
            for atag in tags:
                outputDict['apriltags'].append({
                    'tag_id':
                    atag.tag_id,
                    'corners':
                    atag.corners,
                    'qvec':
                    self.rvec2quat(atag.pose_R),
                    'tvec':
                    atag.pose_t,
                    'tag_family':
                    atag.tag_family,
                    'hamming':
                    atag.hamming,
                    'decision_margin':
                    atag.decision_margin,
                    'homography':
                    atag.homography,
                    'center':
                    atag.center,
                    'pose_error':
                    atag.pose_err
                })
            return outputDict

        except Exception as e:
            self.logger.warning('ImageProcessor process failed: : %s' % str(e))
            pass

    def rvec2quat(self, rvec):
        x = rvec[0]
        y = rvec[1]
        z = rvec[2]
        r = math.sqrt(x * x + y * y + z * z)
        if r < 0.00001:
            return np.array([1, 0, 0, 0])
        c = math.cos(r / 2)
        s = math.sin(r / 2)
        quat_x = c
        quat_y = s * z / r
        quat_z = -s * y / r
        quat_w = -s * x / r
        return np.array([quat_x, quat_y, quat_z, quat_w])

    def publish(self, incomingData):
        if "apriltags" in incomingData:
            for tag in incomingData["apriltags"]:
                # Publish the relative pose
                newApriltagDetectionMsg = AprilTagExtended()
                newApriltagDetectionMsg.header.stamp.secs = int(
                    tag["timestamp_secs"])
                newApriltagDetectionMsg.header.stamp.nsecs = int(
                    tag["timestamp_nsecs"])
                newApriltagDetectionMsg.header.frame_id = str(tag["source"])
                newApriltagDetectionMsg.transform.translation.x = float(
                    tag["tvec"][0])
                newApriltagDetectionMsg.transform.translation.y = float(
                    tag["tvec"][1])
                newApriltagDetectionMsg.transform.translation.z = float(
                    tag["tvec"][2])
                newApriltagDetectionMsg.transform.rotation.x = float(
                    tag["qvec"][0])
                newApriltagDetectionMsg.transform.rotation.y = float(
                    tag["qvec"][1])
                newApriltagDetectionMsg.transform.rotation.z = float(
                    tag["qvec"][2])
                newApriltagDetectionMsg.transform.rotation.w = float(
                    tag["qvec"][3])
                newApriltagDetectionMsg.tag_id = int(tag["tag_id"])
                newApriltagDetectionMsg.tag_family = tag["tag_family"]
                newApriltagDetectionMsg.hamming = int(tag["hamming"])
                newApriltagDetectionMsg.decision_margin = float(
                    tag["decision_margin"])
                newApriltagDetectionMsg.homography = tag["homography"].flatten(
                )
                newApriltagDetectionMsg.center = tag["center"]
                newApriltagDetectionMsg.corners = tag["corners"].flatten()
                newApriltagDetectionMsg.pose_error = tag["pose_error"]

                self.publish_queue.put({
                    "topic": "apriltags",
                    "message": newApriltagDetectionMsg
                })
                # self.publishers["apriltags"].publish(newApriltagDetectionMsg)
                # self.logger.info("Published pose for tag %d in sequence %d" % (
                #     tag["tag_id"], self.seq_stamper))

        # Publish the test and raw data if submitted and requested:
        if self.ACQ_TEST_STREAM:
            if "test_stream_image" in incomingData:
                imgMsg = incomingData["test_stream_image"]
                imgMsg.header.seq = self.seq_stamper
                self.publish_queue.put({
                    "topic": "test_stream_image",
                    "message": imgMsg
                })
                # self.publishers["test_stream_image"].publish(imgMsg)

            if "raw_image" in incomingData:
                imgMsg = incomingData["raw_image"]
                imgMsg.header.seq = self.seq_stamper
                self.publish_queue.put({
                    "topic": "raw_image",
                    "message": imgMsg
                })
                # self.publishers["raw_image"].publish(imgMsg)

            if "rectified_image" in incomingData:
                imgMsg = incomingData["rectified_image"]
                imgMsg.header.seq = self.seq_stamper
                self.publish_queue.put({
                    "topic": "rectified_image",
                    "message": imgMsg
                })
                # self.publishers["rectified_image"].publish(imgMsg)

            if "raw_camera_info" in incomingData:
                self.publish_queue.put({
                    "topic":
                    "raw_camera_info",
                    "message":
                    incomingData["raw_camera_info"]
                })

                # self.publishers["raw_camera_info"].publish(
                #     incomingData["raw_camera_info"])

            if "new_camera_matrix" in incomingData:
                new_camera_info = CameraInfo()
                try:
                    new_camera_info.header = incomingData[
                        "raw_camera_info"].header
                    new_camera_info.height = incomingData["raw_image"].shape[0]
                    new_camera_info.width = incomingData["raw_image"].shape[1]
                    new_camera_info.distortion_model = incomingData[
                        "raw_camera_info"].distortion_model
                    new_camera_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
                except:
                    pass
                new_camera_info.K = incomingData["new_camera_matrix"].flatten()
                self.publish_queue.put({
                    "topic": "new_camera_matrix",
                    "message": new_camera_info
                })
class ImageProcessor(multiprocessing.Process):
    """
    Packages the image rectification and AprilTag detection for images.
    """
    def __init__(self, options, logger, publishers, aprilTagProcessor,
                 publish_queue, config, image_queue):
        super(ImageProcessor, self).__init__()
        self.logger = logger
        self.ImageRectifier = None
        self.publish_queue = publish_queue
        self.image_queue = image_queue
        self.bridge = CvBridge()
        self.aprilTagProcessor = aprilTagProcessor
        self.publishers = publishers
        self.opt_beautify = options.get('beautify', False)
        self.tag_size = options.get('tag_size', 0.065)

        self.ACQ_DEVICE_NAME = config['ACQ_DEVICE_NAME']
        self.ACQ_TEST_STREAM = config['ACQ_TEST_STREAM']

        self.raw_image = None
        self.camera_info = None
        self.seq_stamper = None

    def run(self):
        while True:

            (self.raw_image, self.camera_info,
             self.seq_stamper) = self.image_queue.get(block=True)

            cv_image = self.bridge.compressed_imgmsg_to_cv2(
                self.raw_image, desired_encoding='mono8')

            # Scale the K matrix if the image resolution is not the same as in the calibration
            currRawImage_height = cv_image.shape[0]
            currRawImage_width = cv_image.shape[1]

            scale_matrix = np.ones(9)
            if self.camera_info.height != currRawImage_height or self.camera_info.width != currRawImage_width:
                scale_width = float(currRawImage_width) / \
                    self.camera_info.width
                scale_height = float(currRawImage_height) / \
                    self.camera_info.height

                scale_matrix[0] *= scale_width
                scale_matrix[2] *= scale_width
                scale_matrix[4] *= scale_height
                scale_matrix[5] *= scale_height

            outputDict = dict()

            # Process the image and extract the apriltags
            outputDict = self.process(cv_image, (np.array(self.camera_info.K) *
                                                 scale_matrix).reshape((3, 3)),
                                      self.camera_info.D)
            outputDict['header'] = self.raw_image.header

            # Add the time stamp and source of the input image to the output
            for idx in range(len(outputDict['apriltags'])):
                outputDict['apriltags'][idx][
                    'timestamp_secs'] = self.raw_image.header.stamp.secs
                outputDict['apriltags'][idx][
                    'timestamp_nsecs'] = self.raw_image.header.stamp.nsecs
                outputDict['apriltags'][idx]['source'] = self.ACQ_DEVICE_NAME

            # Generate a diagnostic image
            if self.ACQ_TEST_STREAM == 1:
                image = np.copy(outputDict['rect_image'])

                # Put the AprilTag bound boxes and numbers to the image
                for tag in outputDict['apriltags']:
                    for idx in range(len(tag['corners'])):
                        cv2.line(image,
                                 tuple(tag['corners'][idx - 1, :].astype(int)),
                                 tuple(tag['corners'][idx, :].astype(int)),
                                 (0, 255, 0))
                        # cv2.rectangle(image, (tag['corners'][0, 0].astype(int)-10,tag['corners'][0, 1].astype(int)-10), (tag['corners'][0, 0].astype(int)+15,tag['corners'][0, 1].astype(int)+15), (0, 0, 255), cv2.FILLED)
                    cv2.putText(image,
                                str(tag['tag_id']),
                                org=(tag['corners'][0, 0].astype(int) + 10,
                                     tag['corners'][0, 1].astype(int) + 10),
                                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=1.0,
                                thickness=2,
                                color=(255, 0, 0))

                # Put device and timestamp to the image
                cv2.putText(image,
                            'device: ' + self.ACQ_DEVICE_NAME +
                            ', timestamp: ' +
                            str(self.raw_image.header.stamp.secs) + '+' +
                            str(self.raw_image.header.stamp.nsecs),
                            org=(30, 30),
                            fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=1.0,
                            thickness=2,
                            color=(255, 0, 0))

                # Add the original and diagnostic info to the outputDict
                outputDict[
                    'test_stream_image'] = self.bridge.cv2_to_compressed_imgmsg(
                        image, dst_format='png')
                outputDict[
                    'test_stream_image'].header.stamp.secs = self.raw_image.header.stamp.secs
                outputDict[
                    'test_stream_image'].header.stamp.nsecs = self.raw_image.header.stamp.nsecs
                outputDict[
                    'test_stream_image'].header.frame_id = self.ACQ_DEVICE_NAME

                outputDict['raw_image'] = self.raw_image
                outputDict['raw_camera_info'] = self.camera_info

                outputDict[
                    'rectified_image'] = self.bridge.cv2_to_compressed_imgmsg(
                        outputDict['rect_image'], dst_format='png')
                outputDict[
                    'rectified_image'].header.stamp.secs = self.raw_image.header.stamp.secs
                outputDict[
                    'rectified_image'].header.stamp.nsecs = self.raw_image.header.stamp.nsecs
                outputDict[
                    'rectified_image'].header.frame_id = self.ACQ_DEVICE_NAME

            # PUBLISH HERE
            self.publish(outputDict)

    def process(self, raw_image, cameraMatrix, distCoeffs):
        """
        Processes an image.
        """

        try:
            # 0. Initialize the image rectifier if it hasn't been already (that's done so that we don't recompute the remapping)
            if self.ImageRectifier is None:
                self.ImageRectifier = ImageRectifier(raw_image, cameraMatrix,
                                                     distCoeffs)

            # 1. Rectify the raw image and get the new camera cameraMatrix
            rect_image, newCameraMatrix = self.ImageRectifier.rectify(
                raw_image)

            # 2. Extract april tags data
            if len(rect_image.shape) == 3:
                rect_image_gray = cv2.cvtColor(rect_image, cv2.COLOR_BGR2GRAY)
            else:
                rect_image_gray = rect_image

            # Beautify if wanted:
            if self.opt_beautify and self.ImageRectifier:
                raw_image = self.ImageRectifier.beautify(raw_image)

            # 3. Extract poses from april tags data
            camera_params = (newCameraMatrix[0, 0], newCameraMatrix[1, 1],
                             newCameraMatrix[0, 2], newCameraMatrix[1, 2])
            tags = self.aprilTagProcessor.detect(rect_image_gray, True,
                                                 camera_params, self.tag_size)

            # 4. Package output
            outputDict = dict()
            outputDict['rect_image'] = rect_image
            outputDict['new_camera_matrix'] = newCameraMatrix
            outputDict['apriltags'] = list()
            for atag in tags:
                outputDict['apriltags'].append({
                    'tag_id':
                    atag.tag_id,
                    'corners':
                    atag.corners,
                    'qvec':
                    self.mat2quat(atag.pose_R),
                    'tvec':
                    atag.pose_t,
                    'tag_family':
                    atag.tag_family,
                    'hamming':
                    atag.hamming,
                    'decision_margin':
                    atag.decision_margin,
                    'homography':
                    atag.homography,
                    'center':
                    atag.center,
                    'pose_error':
                    atag.pose_err
                })
            return outputDict

        except Exception as e:
            self.logger.warning('ImageProcessor process failed: : %s' % str(e))
            pass

    def mat2quat(self, M):
        """
        Helper function that converts rotation matrices to quaternions.
        """
        try:
            Qxx, Qyx, Qzx, Qxy, Qyy, Qzy, Qxz, Qyz, Qzz = M.flat
            K = np.array([[Qxx - Qyy - Qzz, 0, 0, 0],
                          [Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0],
                          [Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0],
                          [Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz]
                          ]) / 3.0
            # Use Hermitian eigenvectors, values for speed
            vals, vecs = np.linalg.eigh(K)
            # Select largest eigenvector, reorder to x,y,z,w quaternion
            q = vecs[[0, 1, 2, 3], np.argmax(vals)]
            # Prefer quaternion with positive w
            # (q * -1 corresponds to same rotation as q)
            if q[3] < 0:
                q *= -1
            return q
        except Exception as e:
            self.logger.warning('ImageProcessor process failed: : %s' % str(e))
            pass

    def publish(self, incomingData):
        if "apriltags" in incomingData:
            for tag in incomingData["apriltags"]:
                # Publish the relative pose
                newApriltagDetectionMsg = AprilTagExtended()
                newApriltagDetectionMsg.header.stamp.secs = int(
                    tag["timestamp_secs"])
                newApriltagDetectionMsg.header.stamp.nsecs = int(
                    tag["timestamp_nsecs"])
                newApriltagDetectionMsg.header.frame_id = str(tag["source"])
                newApriltagDetectionMsg.transform.translation.x = float(
                    tag["tvec"][0])
                newApriltagDetectionMsg.transform.translation.y = float(
                    tag["tvec"][1])
                newApriltagDetectionMsg.transform.translation.z = float(
                    tag["tvec"][2])
                newApriltagDetectionMsg.transform.rotation.x = float(
                    tag["qvec"][0])
                newApriltagDetectionMsg.transform.rotation.y = float(
                    tag["qvec"][1])
                newApriltagDetectionMsg.transform.rotation.z = float(
                    tag["qvec"][2])
                newApriltagDetectionMsg.transform.rotation.w = float(
                    tag["qvec"][3])
                newApriltagDetectionMsg.tag_id = int(tag["tag_id"])
                newApriltagDetectionMsg.tag_family = tag["tag_family"]
                newApriltagDetectionMsg.hamming = int(tag["hamming"])
                newApriltagDetectionMsg.decision_margin = float(
                    tag["decision_margin"])
                newApriltagDetectionMsg.homography = tag["homography"].flatten(
                )
                newApriltagDetectionMsg.center = tag["center"]
                newApriltagDetectionMsg.corners = tag["corners"].flatten()
                newApriltagDetectionMsg.pose_error = tag["pose_error"]

                self.publish_queue.put({
                    "topic": "apriltags",
                    "message": newApriltagDetectionMsg
                })
                # self.publishers["apriltags"].publish(newApriltagDetectionMsg)
                self.logger.info("Published pose for tag %d in sequence %d" %
                                 (tag["tag_id"], self.seq_stamper))

        # Publish the test and raw data if submitted and requested:
        if self.ACQ_TEST_STREAM:
            if "test_stream_image" in incomingData:
                imgMsg = incomingData["test_stream_image"]
                imgMsg.header.seq = self.seq_stamper
                self.publish_queue.put({
                    "topic": "test_stream_image",
                    "message": imgMsg
                })
                # self.publishers["test_stream_image"].publish(imgMsg)

            if "raw_image" in incomingData:
                imgMsg = incomingData["raw_image"]
                imgMsg.header.seq = self.seq_stamper
                self.publish_queue.put({
                    "topic": "raw_image",
                    "message": imgMsg
                })
                # self.publishers["raw_image"].publish(imgMsg)

            if "rectified_image" in incomingData:
                imgMsg = incomingData["rectified_image"]
                imgMsg.header.seq = self.seq_stamper
                self.publish_queue.put({
                    "topic": "rectified_image",
                    "message": imgMsg
                })
                # self.publishers["rectified_image"].publish(imgMsg)

            if "raw_camera_info" in incomingData:
                self.publish_queue.put({
                    "topic":
                    "raw_camera_info",
                    "message":
                    incomingData["raw_camera_info"]
                })

                # self.publishers["raw_camera_info"].publish(
                #     incomingData["raw_camera_info"])

            if "new_camera_matrix" in incomingData:
                new_camera_info = CameraInfo()
                try:
                    new_camera_info.header = incomingData[
                        "raw_camera_info"].header
                    new_camera_info.height = incomingData["raw_image"].shape[0]
                    new_camera_info.width = incomingData["raw_image"].shape[1]
                    new_camera_info.distortion_model = incomingData[
                        "raw_camera_info"].distortion_model
                    new_camera_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
                except:
                    pass
                new_camera_info.K = incomingData["new_camera_matrix"].flatten()
                self.publish_queue.put({
                    "topic": "new_camera_matrix",
                    "message": new_camera_info
                })
class deviceSideProcessor():
    """
    Packages the image rectification and AprilTag detection for images.
    """
    def __init__(self, options, logger):
        self.logger = logger
        self.ImageRectifier = None
        self.opt_beautify = options.get('beautify', False)
        self.tag_size = options.get('tag_size', 0.065)
        self.aprilTagProcessor = apriltags3.Detector(searchpath=[ACQ_APRILTAG_SO],
                               families='tag36h11',
                               nthreads=4,
                               quad_decimate=1.0,
                               quad_sigma=0.0,
                               refine_edges=1,
                               decode_sharpening=0.25,
                               debug=0)

    def process(self, raw_image, cameraMatrix, distCoeffs):
        """
        Processes an image.
        """

        try:
            # 0. Initialize the image rectifier if it hasn't been already (that's done so that we don't recompute the remapping)
            if self.ImageRectifier is None:
                self.ImageRectifier = ImageRectifier(raw_image, cameraMatrix, distCoeffs)

            # 1. Rectify the raw image and get the new camera cameraMatrix
            rect_image, newCameraMatrix = self.ImageRectifier.rectify(raw_image)

            # 2. Extract april tags data
            if len(rect_image.shape) == 3:
                rect_image_gray = cv2.cvtColor(rect_image, cv2.COLOR_BGR2GRAY)
            else:
                rect_image_gray = rect_image

            # Beautify if wanted:
            if self.opt_beautify and self.ImageRectifier:
                raw_image = self.ImageRectifier.beautify(raw_image)

            # 3. Extract poses from april tags data
            camera_params = ( newCameraMatrix[0,0], newCameraMatrix[1,1], newCameraMatrix[0,2], newCameraMatrix[1,2] )
            tags = self.aprilTagProcessor.detect(rect_image_gray, True, camera_params, self.tag_size)

            # 4. Package output
            outputDict = dict()
            outputDict['rect_image'] = rect_image
            outputDict['new_camera_matrix'] = newCameraMatrix
            outputDict['apriltags'] = list()
            for atag in tags:
                outputDict['apriltags'].append({'tag_id': atag.tag_id,
                                                'corners': atag.corners,
                                                'qvec': self.mat2quat(atag.pose_R),
                                                'tvec': atag.pose_t,
                                                'tag_family': atag.tag_family,
                                                'hamming': atag.hamming,
                                                'decision_margin': atag.decision_margin,
                                                'homography': atag.homography,
                                                'center': atag.center,
                                                'pose_error': atag.pose_err})
            return outputDict

        except Exception as e:
            self.logger.warning('deviceSideProcessor process failed: : %s' % str(e))
            pass

    def mat2quat(self, M):
        """
        Helper function that converts rotation matrices to quaternions.
        """
        try:
            Qxx, Qyx, Qzx, Qxy, Qyy, Qzy, Qxz, Qyz, Qzz = M.flat
            K = np.array([[Qxx - Qyy - Qzz, 0, 0, 0],
                          [Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0],
                          [Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0],
                          [Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz]]) / 3.0
            # Use Hermitian eigenvectors, values for speed
            vals, vecs = np.linalg.eigh(K)
            # Select largest eigenvector, reorder to x,y,z,w quaternion
            q = vecs[[0, 1, 2, 3], np.argmax(vals)]
            # Prefer quaternion with positive w
            # (q * -1 corresponds to same rotation as q)
            if q[3] < 0:
                q *= -1
            return q
        except:
            self.logger.warning('deviceSideProcessor process failed: : %s' % str(e))
            pass
示例#5
0
class OdometryProcessor():
    """
    Processes the data coming from a remote device (Duckiebot or watchtower).
    """
    def __init__(self, logger):

        # Get the environment variables
        self.ACQ_DEVICE_NAME = os.getenv('ACQ_DEVICE_NAME', 'watchtower10')
        self.ACQ_TOPIC_RAW = os.getenv('ACQ_TOPIC_RAW',
                                       'camera_node/image/compressed')
        self.ACQ_TOPIC_CAMERAINFO = os.getenv('ACQ_TOPIC_CAMERAINFO',
                                              'camera_node/camera_info')
        self.ACQ_TOPIC_VELOCITY_TO_POSE = os.getenv(
            'ACQ_TOPIC_VELOCITY_TO_POSE', None)
        self.ACQ_STATIONARY_ODOMETRY = bool(
            int(os.getenv('ACQ_STATIONARY_ODOMETRY', 0)))
        self.ACQ_ODOMETRY_POST_VISUAL_ODOMETRY_FEATURES = os.getenv(
            'ACQ_ODOMETRY_POST_VISUAL_ODOMETRY_FEATURES', 'SURF')
        self.ACQ_ODOMETRY_TOPIC = os.getenv('ACQ_ODOMETRY_TOPIC', "odometry")
        self.logger = logger

        # Initialize ROS nodes and subscribe to topics
        rospy.init_node('acquisition_processor',
                        anonymous=True,
                        disable_signals=True)

        self.logger.info(
            str('/' + self.ACQ_DEVICE_NAME + '/' + self.ACQ_TOPIC_RAW))

        self.subscriberCompressedImage = rospy.Subscriber(
            '/' + self.ACQ_DEVICE_NAME + '/' + self.ACQ_TOPIC_RAW,
            CompressedImage,
            self.camera_image_callback,
            queue_size=1)
        self.subscriberCameraInfo = rospy.Subscriber(
            '/' + self.ACQ_DEVICE_NAME + '/' + self.ACQ_TOPIC_CAMERAINFO,
            CameraInfo,
            self.camera_info_callback,
            queue_size=1)

        # Only if set (probably not for watchtowers)
        # self.subscriberCameraInfo = rospy.Subscriber('/'+self.ACQ_DEVICE_NAME+'/'+self.ACQ_TOPIC_VELOCITY_TO_POSE, Pose2DStamped,
        #                                              self.odometry_callback,  queue_size=1)

        self.odometry_publisher = rospy.Publisher("/poses_acquisition/" +
                                                  self.ACQ_ODOMETRY_TOPIC,
                                                  TransformStamped,
                                                  queue_size=1)

        # CvBridge is necessary for the image processing
        self.bridge = CvBridge()

        # Initialize attributes
        self.lastCameraInfo = None

        self.ImageRectifier = None

        self.img_id = 0
        # Initialize the device side processor

        self.vo = None
        self.clahe = None
        self.logger.info('Acquisition processor is set up.')

    def visual_odometry(self, outputdict):
        if self.vo is None:
            K = outputdict['new_camera_matrix'].reshape((3, 3))

            # Setting up the visual odometry
            self.vo = VisualOdometry(
                K,
                self.ACQ_ODOMETRY_POST_VISUAL_ODOMETRY_FEATURES,
                pitch_adjust=np.deg2rad(10.0))
            self.clahe = cv2.createCLAHE(clipLimit=5.0)

        img = outputdict['rect_image']
        img = self.clahe.apply(img)

        self.img_id += 1
        self.logger.info("Inside visual odometry")

        if self.vo.update(img, self.img_id):
            self.logger.info("inside if update")

            # The scaling is rather arbitrary, it seems that VO gives centimeters, but in general the scaling is fishy...
            odometry = TransformStamped()
            odometry.header.seq = 0
            odometry.header = outputdict['header']
            odometry.header.frame_id = self.ACQ_DEVICE_NAME
            odometry.child_frame_id = self.ACQ_DEVICE_NAME

            cy = math.cos(self.vo.relative_pose_theta * 0.5)
            sy = math.sin(self.vo.relative_pose_theta * 0.5)
            cp = 1.0
            sp = 0.0
            cr = 1.0
            sr = 0.0

            q_w = cy * cp * cr + sy * sp * sr
            q_x = cy * cp * sr - sy * sp * cr
            q_y = sy * cp * sr + cy * sp * cr
            q_z = sy * cp * cr - cy * sp * sr

            # Save the resuts to the new odometry relative pose message
            odometry.transform.translation.x = self.vo.relative_pose_x
            odometry.transform.translation.y = self.vo.relative_pose_y
            odometry.transform.translation.z = 0
            odometry.transform.rotation.x = q_x
            odometry.transform.rotation.y = q_y
            odometry.transform.rotation.z = q_z
            odometry.transform.rotation.w = q_w
            self.logger.info("publishing odometry")

            self.odometry_publisher.publish(odometry)

    def camera_image_process(self, currRawImage, currCameraInfo):
        """
        Contains the necessary camera image processing.
        """

        # Convert from ROS image message to numpy array
        cv_image = self.bridge.compressed_imgmsg_to_cv2(
            currRawImage, desired_encoding='mono8')

        # Scale the K matrix if the image resolution is not the same as in the calibration
        currRawImage_height = cv_image.shape[0]
        currRawImage_width = cv_image.shape[1]

        scale_matrix = np.ones(9)
        if currCameraInfo.height != currRawImage_height or currCameraInfo.width != currRawImage_width:
            scale_width = float(currRawImage_width) / currCameraInfo.width
            scale_height = float(currRawImage_height) / currCameraInfo.height

            scale_matrix[0] *= scale_width
            scale_matrix[2] *= scale_width
            scale_matrix[4] *= scale_height
            scale_matrix[5] *= scale_height

        outputDict = dict()

        # Process the image and extract the apriltags

        cameraMatrix = (np.array(currCameraInfo.K) * scale_matrix).reshape(
            (3, 3))

        if self.ImageRectifier is None:
            self.ImageRectifier = ImageRectifier(cv_image, cameraMatrix,
                                                 currCameraInfo.D)

        # Rectify the raw image and get the new camera cameraMatrix
        rect_image, newCameraMatrix = self.ImageRectifier.rectify(cv_image)

        # Package output
        outputDict['rect_image'] = rect_image
        outputDict['new_camera_matrix'] = newCameraMatrix
        outputDict['header'] = currRawImage.header
        self.logger.info("end of camera process")

        return outputDict

    def camera_info_callback(self, ros_data):
        """
        Callback function that is executed upon reception of new camera info data.
        """
        self.lastCameraInfo = ros_data

    def camera_image_callback(self, ros_data):
        """
        Callback function that is executed upon reception of a new camera image.
        """
        self.logger.info("received image")
        if self.lastCameraInfo is not None:
            self.visual_odometry(
                self.camera_image_process(ros_data, self.lastCameraInfo))
            self.logger.info("processed image")