class AugmentedRealityBasicsNode(DTROS):
    """
    AR basics. Project lines with predefined end points into the camera image.
    """
    def __init__(self, node_name):
        # Initialize the DTROS parent class
        super(AugmentedRealityBasicsNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        self.robot_name = rospy.get_namespace().strip('/')
        self._map_file_name = os.environ.get("DT_MAP_NAME", "hud")
        self._repo_path = os.environ.get("DT_REPO_PATH")

        # load camera intrinsics
        calibration_data = self.read_yaml_file(
            f"/data/config/calibrations/camera_intrinsic/{self.robot_name}.yaml"
        )
        camera_info = self.camera_info_from_yaml(calibration_data)

        # load camera extrinsics
        extrinsics = self.read_yaml_file(
            f"/data/config/calibrations/camera_extrinsic/{self.robot_name}.yaml"
        )
        homography = np.array(extrinsics["homography"]).reshape(3, 3)
        homography = np.linalg.inv(homography)  # map ground to image plane

        # initialize augmenter utility class
        self.augmenter = Augmenter(camera_info, homography, debug=False)

        # for cv2 and msg conversions
        self.bridge = CvBridge()

        # load map file
        self.map_dict = self.read_yaml_file(
            f"{self._repo_path}/packages/augmented_reality_basics/maps/{self._map_file_name}.yaml"
        )

        # subscribe to camera stream
        self.sub_camera_img = rospy.Subscriber("camera_node/image/compressed",
                                               CompressedImage,
                                               self.callback,
                                               queue_size=1)

        # publish modified image
        self.pub_modified_img = rospy.Publisher(
            f"~{self._map_file_name}/image/compressed",
            CompressedImage,
            queue_size=1)

        # map point coordinates in map_dict to image plane
        self.project_map_points(self.map_dict["points"])

        self.log("Letsgoooooooooooooooooo")

    def callback(self, msg):
        """
        On camera image callback, rectify image, project line segments onto it and publish it.
        """
        img = self.bridge.compressed_imgmsg_to_cv2(msg)  # convert to cv2 img

        img_rectified = self.augmenter.process_image(img)  # rectify img

        img_modified = self.augmenter.render_segments(
            img_rectified, self.map_dict)  # draw stuff on img

        # publish modified img
        img_out = self.bridge.cv2_to_compressed_imgmsg(img_modified)
        img_out.header = msg.header
        img_out.format = msg.format
        self.pub_modified_img.publish(img_out)

    def read_yaml_file(self, filename):
        """
        Reads the YAML file in the path specified by 'fname'.
        E.G. :
            the calibration file is located in : `/data/config/calibrations/filename/DUCKIEBOT_NAME.yaml`
        """
        with open(filename, 'r') as in_file:
            try:
                yaml_dict = yaml.load(in_file)
                return yaml_dict
            except yaml.YAMLError as exc:
                self.log("YAML syntax error. File: %s fname. Exc: %s" %
                         (filename, exc),
                         type='fatal')
                rospy.signal_shutdown()
                return

    @staticmethod
    def camera_info_from_yaml(calib_data):
        """
        Express calibration data (intrinsics) as a CameraInfo instance.

        :param calib_data: dict, loaded from yaml file
        :return: intrinsics as CameraInfo instance
        """
        cam_info = CameraInfo()
        cam_info.width = calib_data['image_width']
        cam_info.height = calib_data['image_height']
        cam_info.K = calib_data['camera_matrix']['data']
        cam_info.D = calib_data['distortion_coefficients']['data']
        cam_info.R = calib_data['rectification_matrix']['data']
        cam_info.P = calib_data['projection_matrix']['data']
        cam_info.distortion_model = calib_data['distortion_model']
        return cam_info

    def project_map_points(self, points_dict):
        """
        Project points in 'points_dict' into image frame. Modifies original dict.
        """
        for item in points_dict.values():
            point = [item[1][0], item[1][1]]
            frame = item[0]
            if frame == "image01":
                transformed_point = self.augmenter.image01_to_pixel(point)
            elif frame == "axle":
                transformed_point = self.augmenter.ground2pixel(point)
            elif frame == "image":
                transformed_point = point
            else:
                raise Exception(
                    "[AugmentedRealityBasicsNode.project_map_points] Invalid frame"
                )
            item[0] = "image"
            item[1] = transformed_point
Esempio n. 2
0
class AugmentedRealityBasicsNode(DTROS):
    def __init__(self, node_name):
        # initialize the DTROS parent class
        super(AugmentedRealityBasicsNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)
        # bridge between opencv and ros
        self.bridge = CvBridge()

        # get map params
        self.map_file = rospy.get_param('~map_file')
        self.map_file_basename = self.map_file.split('.')[0]

        # construct subscriber for images
        self.camera_sub = rospy.Subscriber('camera_node/image/compressed',
                                           CompressedImage, self.callback)
        # construct publisher for AR images
        self.pub = rospy.Publisher('~' + self.map_file_basename +
                                   '/image/compressed',
                                   CompressedImage,
                                   queue_size=10)

        # get camera calibration parameters (homography, camera matrix, distortion parameters)
        self.intrinsics_file = '/data/config/calibrations/camera_intrinsic/' + \
            rospy.get_namespace().strip("/") + ".yaml"
        self.extrinsics_file = '/data/config/calibrations/camera_extrinsic/' + \
            rospy.get_namespace().strip("/") + ".yaml"
        rospy.loginfo('Reading camera intrinsics from {}'.format(
            self.intrinsics_file))
        rospy.loginfo('Reading camera extrinsics from {}'.format(
            self.extrinsics_file))

        intrinsics = readYamlFile(self.intrinsics_file)
        extrinsics = readYamlFile(self.extrinsics_file)

        camera_params = {}
        camera_params['image_height'] = intrinsics['image_height']
        camera_params['image_width'] = intrinsics['image_width']
        camera_params['camera_matrix'] = np.array(
            intrinsics['camera_matrix']['data']).reshape(3, 3)
        camera_params['distortion_coefficients'] = np.array(
            intrinsics['distortion_coefficients']['data'])
        camera_params['homography'] = np.array(
            extrinsics['homography']).reshape(3, 3)

        # initialize augmenter with camera calibration parameters
        self.augmenter = Augmenter(camera_params)

        # read mapfile as a dictionary
        rospack = rospkg.RosPack()
        map_path = rospack.get_path(
            'augmented_reality_basics') + '/maps/' + self.map_file
        rospy.loginfo('Reading map file from {}'.format(map_path))
        self.map_dict = readYamlFile(map_path)

        # make sure map is in the right coordinates
        for _, val in self.map_dict['points'].items():
            if val[0] == 'axle':
                val[0] = 'image_undistorted'
                val[-1] = self.augmenter.ground2pixel(val[-1])

    def callback(self, data):
        raw_img = self.bridge.compressed_imgmsg_to_cv2(
            data, desired_encoding="passthrough")

        undistorted_img = self.augmenter.process_image(raw_img)
        ar_img = self.augmenter.render_segments(undistorted_img, self.map_dict)
        ar_img = self.augmenter.crop_to_roi(ar_img)

        msg = self.bridge.cv2_to_compressed_imgmsg(ar_img, dst_format='jpeg')
        self.pub.publish(msg)
class AugmentedRealityBasics(DTROS):
    def __init__(self, node_name):
        """Wheel Encoder Node
        This implements basic functionality with the wheel encoders.
        """

        # Initialize the DTROS parent class
        super(AugmentedRealityBasics,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        self.veh_name = rospy.get_namespace().strip("/")
        rospy.loginfo("[AugmentedRealityBasics]: Vehicle Name = %s" %
                      self.veh_name)

        # Intrinsics
        rospy.loginfo(
            "[AugmentedRealityBasics]: Loading Camera Calibration Intrinsics ..."
        )

        if (not os.path.isfile(
                f'/data/config/calibrations/camera_intrinsic/{self.veh_name}.yaml'
        )):
            rospy.logwarn(
                f'[AugmentedRealityBasics]: Could not find {self.veh_name}.yaml. Loading default.yaml'
            )
            camera_intrinsic = self.read_yaml_file(
                f'/data/config/calibrations/camera_intrinsic/default.yaml')
        else:
            camera_intrinsic = self.read_yaml_file(
                f'/data/config/calibrations/camera_intrinsic/{self.veh_name}.yaml'
            )
        camera_info = self.camera_info_from_yaml(camera_intrinsic)
        #rospy.loginfo(f"[AugmentedRealityBasics]: camera_info = {camera_info}") #debug

        # Extrinsics
        rospy.loginfo(
            "[AugmentedRealityBasics]: Loading Camera Calibration Extrinsics ..."
        )

        if (not os.path.isfile(
                f'/data/config/calibrations/camera_extrinsic/{self.veh_name}.yaml'
        )):
            rospy.logwarn(
                f'[AugmentedRealityBasics]: Could not find {self.veh_name}.yaml. Loading default.yaml'
            )
            extrinsics = self.read_yaml_file(
                f'/data/config/calibrations/camera_extrinsic/default.yaml')
        else:
            extrinsics = self.read_yaml_file(
                f'/data/config/calibrations/camera_extrinsic/{self.veh_name}.yaml'
            )

        homography = np.array(extrinsics["homography"]).reshape(
            3, 3
        )  #homography that maps axle coordinates to image frame coordinates
        homography = np.linalg.inv(homography)
        #rospy.loginfo(f"[AugmentedRealityBasics]: homography: {homography}") #debug

        # Augmenter class
        rospy.loginfo("[AugmentedRealityBasics]: Initializing Subscribers ...")
        self.augmenter = Augmenter(camera_info, homography, debug=False)

        # Load Map
        rospy.loginfo("[AugmentedRealityBasics]: Loading Map ...")
        self.map_name = os.environ.get('MAP_FILE', 'hud')
        rospy.loginfo("[AugmentedRealityBasics]: Map Name: %s" % self.map_name)
        self.map_dict = self.read_yaml_file(
            os.environ.get('DT_REPO_PATH', '/') +
            '/packages/augmented_reality_basics/maps/' + self.map_name +
            '.yaml')

        # Remap points in map_dict from their reference frame to image frame
        self.remap_points(self.map_dict["points"])

        # CV bridge
        self.cv_bridge = CvBridge()

        # Subscribers
        rospy.loginfo("[AugmentedRealityBasics]: Initializing Subscribers ...")
        self.image_subscriber = rospy.Subscriber(
            'camera_node/image/compressed', CompressedImage, self.callback)

        # Publishers
        rospy.loginfo("[AugmentedRealityBasics]: Initializing Publishers ...")
        self.mod_img_pub = rospy.Publisher(
            f'augmented_reality_basics_node/{self.map_name}/image/compressed',
            CompressedImage,
            queue_size=1)

        rospy.loginfo("[AugmentedRealityBasics]: Initialized.")

    def callback(self, imgmsg):

        # Convert msg to cv2
        img = self.cv_bridge.compressed_imgmsg_to_cv2(imgmsg)

        # Process image
        undistorted_image = self.augmenter.process_image(img)

        # Project points to img pixels
        # (already done during node init, doesn't need to be redone)

        # Render modified image with segments
        modified_image = self.augmenter.render_segments(
            undistorted_image, self.map_dict)

        # Create ROS msg
        modified_image_msg = self.cv_bridge.cv2_to_compressed_imgmsg(
            modified_image)
        modified_image_msg.header.stamp = rospy.Time.now()

        self.mod_img_pub.publish(modified_image_msg)

        return

    def remap_points(self, points_dict):

        for item in points_dict.values():
            frame = item[0]
            point = item[1]

            if (frame == "axle"):
                item[0] = "image"
                item[1] = self.augmenter.ground2pixel(point)
            elif (frame == "image01"):
                item[0] = "image"
                item[1] = self.augmenter.image01_to_pixel(point)
            elif (frame == "image"):
                pass
            else:
                raise Exception(
                    f"[AugmentedRealityBasics.remap_points]: Invalid frame: {frame}"
                )

        #rospy.loginfo(f"[AugmentedRealityBasics]: Remapped points: {points_dict}")

    def read_yaml_file(self, fname):
        """
        Reads the YAML file in the path specified by 'fname'.
        E.G. :
            the calibration file is located in : `/data/config/calibrations/filename/DUCKIEBOT_NAME.yaml`
        """
        if (not os.path.isfile(fname)):
            rospy.logwarn(
                "[AugmentedRealityBasics]: Could not find file in %s" % fname)
            return

        with open(fname, 'r') as in_file:
            try:
                yaml_dict = yaml.load(in_file)
                return yaml_dict
            except yaml.YAMLError as exc:
                self.log("YAML syntax error. File: %s fname. Exc: %s" %
                         (fname, exc),
                         type='fatal')
                rospy.signal_shutdown()
                return

    @staticmethod
    def camera_info_from_yaml(calib_data):
        """
        Express calibration data (intrinsics) as a CameraInfo instance.
        input: calib_data: dict, loaded from yaml file
        output: intrinsics as CameraInfo instance
        """
        cam_info = CameraInfo()
        cam_info.width = calib_data['image_width']
        cam_info.height = calib_data['image_height']
        cam_info.K = calib_data['camera_matrix']['data']
        cam_info.D = calib_data['distortion_coefficients']['data']
        cam_info.R = calib_data['rectification_matrix']['data']
        cam_info.P = calib_data['projection_matrix']['data']
        cam_info.distortion_model = calib_data['distortion_model']
        return cam_info

    def run(self):
        rate = rospy.Rate(2.0)
        while not rospy.is_shutdown():

            #Stuff to do

            rate.sleep()

    @staticmethod
    def extract_camera_data(data):
        k = np.array(data['camera_matrix']['data']).reshape(3, 3)
        d = np.array(data['distortion_coefficients']['data'])
        r = np.array(data['rectification_matrix']['data']).reshape(3, 3)
        p = np.array(data['projection_matrix']['data']).reshape(3, 4)
        width = data['image_width']
        height = data['image_height']
        distortion_model = data['distortion_model']
        return k, d, r, p, width, height, distortion_model