def publish_pose(self, rvecs, tvecs, blf=True):
        msg_pose = geometry_msgs.msg.PoseStamped()
        msg_pose.header.stamp = rospy.Time.now()
        T_lm_to_camo = cv_u.T_of_t_r(tvecs[0], rvecs[0])
        if blf:
            msg_pose.header.frame_id=self.ref_frame
            T_lm_to_blf = np.dot(self.cam.cam_to_world_T, T_lm_to_camo)
            cv_u._position_and_orientation_from_T(msg_pose.pose.position, msg_pose.pose.orientation, T_lm_to_blf)
        else:
            msg_pose.header.frame_id=self.cam.camo_frame
            cv_u._position_and_orientation_from_T(msg_pose.pose.position, msg_pose.pose.orientation, T_lm_to_camo)

        self.pose_pub.publish(msg_pose)
 def publish_landmarks(self, rvecs, tvecs, ids):
     _msg = cartographer_ros_msgs.msg.LandmarkList() 
     _msg.header.stamp = rospy.Time.now()
     _msg.header.frame_id = self.ref_frame#"caroline/base_link_footprint"
     for _r, _v, _id in zip(rvecs, tvecs, ids):
         lme = cartographer_ros_msgs.msg.LandmarkEntry()
         #string id
         lme.id='{}'.format(_id)
         #geometry_msgs/Pose tracking_from_landmark_transform
         T_lm_to_camo = cv_u.T_of_t_r(_v, _r)
         T_lm_to_blf = np.dot(self.cam.cam_to_world_T, T_lm_to_camo)
         cv_u._position_and_orientation_from_T(lme.tracking_from_landmark_transform.position,
                                               lme.tracking_from_landmark_transform.orientation, T_lm_to_blf)
         #float64 translation_weight
         #float64 rotation_weight
         lme.translation_weight = 0.1
         lme.rotation_weight = 0.1
         _msg.landmark.append(lme)
     self.lm_pub.publish(_msg)
 def publish_markers(self, rvecs, tvecs, ids, rgba=(1.,1.,0.,1.), blf=False):
     msg = visualization_msgs.msg.MarkerArray()
     for _r, _t, _id in zip(rvecs, tvecs, ids):
         marker = visualization_msgs.msg.Marker()
         marker.type = marker.LINE_STRIP
         marker.action = marker.ADD
         marker.id = _id
         s = marker.scale; s.x, s.y, s.z = 0.01, 0, 0
         c = marker.color; c.r, c.g, c.b, c.a = rgba
         T_lm_to_camo = cv_u.T_of_t_r(_t, _r)
         if blf:
             marker.header.frame_id = self.ref_frame
             T_lm_to_blf = np.dot(self.cam.cam_to_world_T, T_lm_to_camo)
             cv_u._position_and_orientation_from_T(marker.pose.position, marker.pose.orientation, T_lm_to_blf)
         else:
             marker.header.frame_id = self.cam.camo_frame
             cv_u._position_and_orientation_from_T(marker.pose.position, marker.pose.orientation, T_lm_to_camo)
         d = self.pipeline.marker_size
         marker.points = [msgPoint(0, 0, 0), msgPoint(d, 0, 0), msgPoint(d, d, 0), msgPoint(0, d, 0), msgPoint(0, 0, 0)]
         msg.markers.append(marker)
     self.marker_pub.publish(msg)
示例#4
0
 def _detect_and_localize_board(self, img, cam):
     # detect markers
     self.corners, self.ids, rejectedImgPoints = cv2.aruco.detectMarkers(self.img, self.ar_dict)
     # refine markers detection using board description
     res = aruco.refineDetectedMarkers(self.img, self.gridboard, self.corners, self.ids, rejectedImgPoints, cam.K, cam.D)
     detectedCorners, detectedIds, rejectedCorners, recoveredIdxs = res
     
     response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(
         markerCorners=detectedCorners,
         markerIds=detectedIds,
         image=self.img,
         board=self.gridboard)
         
     ret, self.rvec, self.tvec = cv2.aruco.estimatePoseCharucoBoard(charuco_corners, charuco_ids, self.gridboard,
                                                                    cam.K, cam.D, None, None, False)
     self.board_2_cam_T = cv_u.T_of_t_r(self.tvec.squeeze(), self.rvec)
     self.ref_2_board_t = [0., 0., -0.005]         # ref (track frame) to ar board transform
     self.ref_2_board_R = [[1, 0, 0],[0, 1, 0],[0, 0, 1]]
     self.ref_2_board_T = cv_u.T_of_t_R(self.ref_2_board_t, self.ref_2_board_R)
     self.ref_2_cam_T = np.dot(self.board_2_cam_T, self.ref_2_board_T)
     cam.set_pose_T(self.ref_2_cam_T)
     rospy.loginfo('found world to cam pose')
示例#5
0
# -*- coding: utf-8 -*-
import logging, glob, yaml

LOG = logging.getLogger('calibrate_extrinsic')
logging.basicConfig(level=logging.INFO)

import common_vision.utils as cv_u

import calibrate_extrinsics as calib_ext

if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    intr_file = '/home/poine/.ros/camera_info/julie_short_range.yaml'
    img_file = '/home/poine/work/robot_data/julie/julie_extr_calib_1.png'
    calib_pts_file = '/home/poine/work/robot_data/julie/ext_calib_pts_1.yaml'
    LOG.info("Loading image {}".format(img_file))
    LOG.info("Loading pts {}".format(calib_pts_file))
    # compute extrinsics
    rotation_vector, translation_vector, pts_name, pts_img, pts_world, rep_pts_img = calib_ext.calibrate_extrinsics(
        intr_file, calib_pts_file)
    world_to_camo_T = cv_u.T_of_t_r(translation_vector.squeeze(),
                                    rotation_vector)
    calib_ext.print_caml_transforms(world_to_camo_T)
    calib_ext.write_yaml(
        '/home/poine/work/robot_data/julie/julie_short_range_extr.yaml',
        world_to_camo_T,
        'extrinsics (base footprint to camera optical frame transform)')
    calib_ext.draw_result_image(img_file, pts_name, pts_img, pts_world,
                                rep_pts_img)
    calib_ext.draw_result_3D(world_to_camo_T, pts_world, pts_name)