예제 #1
0
    def run(self):
        # left_cam_info = self.yaml_to_camera_info(self.left_yaml_file)
        # right_cam_info = self.yaml_to_camera_info(self.right_yaml_file)

        left_cam_info = CameraInfo()
        left_cam_info.width = 640
        left_cam_info.height = 480
        left_cam_info.K = [742.858255, 0.000000, 342.003337, 0.000000, 752.915532, 205.211518, 0.000000, 0.000000, 1.000000]
        left_cam_info.D = [0.098441, 0.046414, -0.039316, 0.011202, 0.000000]
        left_cam_info.R = [0.983638, 0.047847, -0.173686, -0.048987, 0.998797, -0.002280, 0.173368, 0.010751, 0.984798]
        left_cam_info.P = [905.027641, 0.000000, 500.770557, 0.000000, 0.000000, 905.027641, 180.388927, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
        left_cam_info.distortion_model = 'plumb_bob'

        right_cam_info = CameraInfo()
        right_cam_info.width = 640
        right_cam_info.height = 480
        right_cam_info.K = [747.026840, 0.000000, 356.331849, 0.000000, 757.336986, 191.248883, 0.000000, 0.000000, 1.000000]
        right_cam_info.D = [0.127580, -0.050428, -0.035857, 0.018986, 0.000000]
        right_cam_info.R = [0.987138, 0.047749, -0.152571, -0.046746, 0.998855, 0.010153, 0.152881, -0.002890, 0.988240]
        right_cam_info.P = [905.027641, 0.000000, 500.770557, -42.839515, 0.000000, 905.027641, 180.388927, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
        right_cam_info.distortion_model = 'plumb_bob'
        
        rate = rospy.Rate(30)
        while not rospy.is_shutdown():
            ret,frame=self.cam.read()
            if not ret:
                print('[ERROR]: frame error')
                break            
            expand_frame=cv2.resize(frame,None,fx=2,fy=1)

            left_image = expand_frame[0:480,0:640]
            right_image = expand_frame[0:480,640:1280]
            

            self.msg_header.frame_id = 'stereo_image'
            self.msg_header.stamp = rospy.Time.now()
            left_cam_info.header = self.msg_header
            right_cam_info.header = self.msg_header
            self.left_image_info_pub.publish(left_cam_info)
            self.right_image_info_pub.publish(right_cam_info)
            # self.pub_image(self.left_image_pub, left_image, self.msg_header )
            # self.pub_image(self.right_image_pub, right_image, self.msg_header )

            try:
                thread.start_new_thread( self.pub_image, (self.left_image_pub, left_image, self.msg_header, ))
                thread.start_new_thread( self.pub_image, (self.right_image_pub, right_image, self.msg_header, ))
            except:
                print("[ERROR]: unable to start thread")
            rate.sleep()
예제 #2
0
    def __init__(self):
        rospy.init_node('image_converter', anonymous=True)
        self.filtered_face_locations = rospy.get_param('camera_topic')
        self.image_pub = rospy.Publisher(self.filtered_face_locations, Image)
        self.image_info = rospy.Publisher('camera_info', CameraInfo)
        self.bridge = CvBridge()
        cap = cv2.VideoCapture('/home/icog-labs/Videos/video.mp4')
        print cap.get(5)
        path = rospkg.RosPack().get_path("robots_config")
        path = path + "/robot/camera.yaml"
        stream = file(path, 'r')
        calib_data = yaml.load(stream)
        cam_info = CameraInfo()
        cam_info.width = calib_data['image_width']
        cam_info.height = calib_data['image_height']
        cam_info.K = calib_data['camera_matrix']['data']
        cam_info.D = calib_data['distortion_coefficients']['data']
        cam_info.R = calib_data['rectification_matrix']['data']
        cam_info.P = calib_data['projection_matrix']['data']
        cam_info.distortion_model = calib_data['distortion_model']
        while (not rospy.is_shutdown()) and (cap.isOpened()):
            self.keystroke = cv2.waitKey(1000 / 30)
            ret, frame = cap.read()

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # cv2.imshow('frame', gray)
            self.image_pub.publish(self.bridge.cv2_to_imgmsg(gray, "mono8"))
            self.image_info.publish(cam_info)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
예제 #3
0
def save_camera_data(bag, kitti, camera, timestamps):
    print("Exporting camera {}".format(camera.nr))

    camera_info = CameraInfo()
    camera_info.header.frame_id = rectified_camera_frame_id
    camera_info.K = list(
        getattr(kitti.calib, 'K_cam{}'.format(camera.nr)).flat)
    camera_info.P = list(
        getattr(kitti.calib, 'P_rect_{}0'.format(camera.nr)).flat)
    # We do not include the D and R parameters from the calibration data since the images are
    # already undistorted and rectified to the camera #0 frame.
    camera_info.R = list(np.eye(3).flat)

    cv_bridge = CvBridge()

    image_paths = getattr(kitti, 'cam{}_files'.format(camera.nr))
    for timestamp, image_path in tqdm(list(zip(timestamps, image_paths))):
        cv_image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
        camera_info.height, camera_info.width = cv_image.shape[:2]
        encoding = 'bgr8' if camera.is_rgb else 'mono8'
        image_message = cv_bridge.cv2_to_imgmsg(cv_image, encoding=encoding)
        image_message.header.frame_id = rectified_camera_frame_id
        t = to_rostime(timestamp)
        image_message.header.stamp = t
        camera_info.header.stamp = t
        # Follow the naming conventions from
        # http://docs.ros.org/melodic/api/sensor_msgs/html/msg/CameraInfo.html
        image_topic_ext = '/image_rect_color' if camera.is_rgb else '/image_rect'
        bag.write(camera.topic_id + image_topic_ext, image_message, t=t)
        bag.write(camera.topic_id + '/camera_info', camera_info, t=t)
def yaml_to_CameraInfo(calib_yaml):
    """
    Parse a yaml file containing camera calibration data (as produced by
    rosrun camera_calibration cameracalibrator.py) into a
    sensor_msgs/CameraInfo msg.

    Parameters
    ----------
    yaml_fname : str
        Path to yaml file containing camera calibration data

    Returns
    -------
    camera_info_msg : sensor_msgs.msg.CameraInfo
        A sensor_msgs.msg.CameraInfo message containing the camera calibration
        data
    """
    # Load data from file
    calib_data = yaml.load(calib_yaml)
    # Parse
    camera_info_msg = CameraInfo()
    camera_info_msg.width = calib_data["image_width"]
    camera_info_msg.height = calib_data["image_height"]
    camera_info_msg.K = calib_data["camera_matrix"]["data"]
    camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
    camera_info_msg.R = calib_data["rectification_matrix"]["data"]
    camera_info_msg.P = calib_data["projection_matrix"]["data"]
    camera_info_msg.distortion_model = calib_data["distortion_model"]
    return camera_info_msg
예제 #5
0
def get_camera_info(pipeline, array):
    profile = pipeline.get_active_profile()
    if "color" in array:
        stream_profile = rs.video_stream_profile(
            profile.get_stream(rs.stream.color))
        stream_intrinsics = stream_profile.get_intrinsics()
    elif "depth" in array:
        stream_profile = rs.video_stream_profile(
            profile.get_stream(rs.stream.depth))
        stream_intrinsics = stream_profile.get_intrinsics()

    camera_info = CameraInfo()
    camera_info.width = stream_intrinsics.width
    camera_info.height = stream_intrinsics.height
    camera_info.distortion_model = 'plumb_bob'
    cx = stream_intrinsics.ppx
    cy = stream_intrinsics.ppy
    fx = stream_intrinsics.fx
    fy = stream_intrinsics.fy
    camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]
    camera_info.D = [0, 0, 0, 0, 0]
    camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]
    camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0]

    return camera_info
def yaml_to_CameraInfo(yaml_fname):
    """
    Parse a yaml file containing camera calibration data (as produced by 
    rosrun camera_calibration cameracalibrator.py) into a 
    sensor_msgs/CameraInfo msg.
    
    Parameters
    ----------
    yaml_fname : str
        Path to yaml file containing camera calibration data
    Returns
    -------
    camera_info_msg : sensor_msgs.msg.CameraInfo
        A sensor_msgs.msg.CameraInfo message containing the camera calibration
        data
    """
    # Load data from file
    with open(yaml_fname, "r") as file_handle:
        calib_data = yaml.load(file_handle)
    # Parse
    camera_info_msg = CameraInfo()
    camera_info_msg.width = calib_data["image_width"]
    camera_info_msg.height = calib_data["image_height"]
    camera_info_msg.K = calib_data["camera_matrix"]["data"]
    camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
    camera_info_msg.R = calib_data["rectification_matrix"]["data"]
    camera_info_msg.P = calib_data["projection_matrix"]["data"]
    camera_info_msg.distortion_model = calib_data["distortion_model"]
    return camera_info_msg
예제 #7
0
  def __init__(self):
    rospy.init_node('image_converter', anonymous=True)
    self.filtered_face_locations = rospy.get_param('camera_topic')
    self.image_pub = rospy.Publisher(self.filtered_face_locations,Image)
    self.image_info = rospy.Publisher('camera_info',CameraInfo)
    self.bridge = CvBridge()
    cap = cv2.VideoCapture('/home/icog-labs/Videos/video.mp4')
    print cap.get(5)
    path = rospkg.RosPack().get_path("robots_config")
    path = path + "/robot/camera.yaml"
    stream = file(path, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    while (not rospy.is_shutdown()) and (cap.isOpened()):
        self.keystroke = cv2.waitKey(1000 / 30)
        ret, frame = cap.read()

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # cv2.imshow('frame', gray)
        self.image_pub.publish(self.bridge.cv2_to_imgmsg(gray, "mono8"))
        self.image_info.publish(cam_info)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #8
0
    def get_camera_info(self, yaml_fname):
        """
        Parse a yaml file containing camera calibration data
        Parameters
        ----------
        yaml_fname : str
            Path to yaml file containing camera calibration data

        Returns
        -------
        camera_info_msg : sensor_msgs.msg.CameraInfo
            A sensor_msgs.msg.CameraInfo message containing the camera calibration
            data
        """
        # Load data from file
        with open(yaml_fname, "r") as file_handle:
            calib_data = yaml.load(file_handle)
        # Parse
        camera_info_msg = CameraInfo()
        camera_info_msg.width = calib_data["image_width"]
        camera_info_msg.height = calib_data["image_height"]
        camera_info_msg.K = calib_data["intrinsics"]["data"]
        camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
        camera_info_msg.R = calib_data["rectification_matrix"]["data"]
        camera_info_msg.P = calib_data["projection_matrix"]["data"]
        camera_info_msg.distortion_model = calib_data["distortion_model"]
        camera_info_msg.header.frame_id = calib_data["frame_id"]

        return camera_info_msg
 def publish(self, event):
     if self.imgmsg is None:
         return
     now = rospy.Time.now()
     # setup ros message and publish
     with self.lock:
         self.imgmsg.header.stamp = now
         self.imgmsg.header.frame_id = self.frame_id
     self.pub.publish(self.imgmsg)
     if self.publish_info:
         info = CameraInfo()
         info.header.stamp = now
         info.header.frame_id = self.frame_id
         info.width = self.imgmsg.width
         info.height = self.imgmsg.height
         if self.fovx is not None and self.fovy is not None:
             fx = self.imgmsg.width / 2.0 / \
                 np.tan(np.deg2rad(self.fovx / 2.0))
             fy = self.imgmsg.height / 2.0 / \
                 np.tan(np.deg2rad(self.fovy / 2.0))
             cx = self.imgmsg.width / 2.0
             cy = self.imgmsg.height / 2.0
             info.K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1.0])
             info.P = np.array([fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1, 0])
             info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
         self.pub_info.publish(info)
예제 #10
0
    def _build_camera_info(self):
        """
        Private function to compute camera info

        camera info doesn't change over time
        """
        camera_info = CameraInfo()
        # store info without header
        camera_info.header = self.get_msg_header()
        camera_info.width = int(self.carla_actor.attributes['image_size_x'])
        camera_info.height = int(self.carla_actor.attributes['image_size_y'])
        camera_info.distortion_model = 'plumb_bob'
        cx = camera_info.width / 2.0
        cy = camera_info.height / 2.0
        fx = camera_info.width / (
            2.0 * math.tan(float(self.carla_actor.attributes['fov']) * math.pi / 360.0))
        fy = fx
        if ROS_VERSION == 1:
            camera_info.K = [fx, 0.0, cx, 0.0, fy, cy, 0.0, 0.0, 1.0]
            camera_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
            camera_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
            camera_info.P = [fx, 0.0, cx, 0.0, 0.0, fy, cy, 0.0, 0.0, 0.0, 1.0, 0.0]
        elif ROS_VERSION == 2:
            # pylint: disable=assigning-non-slot
            camera_info.k = [fx, 0.0, cx, 0.0, fy, cy, 0.0, 0.0, 1.0]
            camera_info.d = [0.0, 0.0, 0.0, 0.0, 0.0]
            camera_info.r = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
            camera_info.p = [fx, 0.0, cx, 0.0, 0.0, fy, cy, 0.0, 0.0, 0.0, 1.0, 0.0]
        self._camera_info = camera_info
예제 #11
0
    def __init__(self):
        # initialize this code as a ROS node named stream_to_ROS_node:
        rospy.init_node("stream_to_ROS_node", anonymous=True)

        # create publisher to publish frames from the video stream:
        self.image_pub = rospy.Publisher("/balrog_camera/image_raw", Image, queue_size = 1)

        # create publisher to publish camera info (calibration params):
        self.camera_info_pub = rospy.Publisher("/balrog_camera/camera_info", CameraInfo, queue_size = 1)

        # initialize cv_bridge for conversion between openCV and ROS images:
        self.cv_bridge = CvBridge()

        # the most recently read camera frame:
        self.latest_frame =  None

        # start a thread constantly reading frames from the RPI video stream:
        self.thread_video = Thread(target = self.video_thread)
        self.thread_video.start()

        # camera info and calibration parameters:
        camera_info_msg = CameraInfo()
        camera_info_msg.height = 360
        camera_info_msg.width = 640
        camera_info_msg.distortion_model = "plumb_bob"
        camera_info_msg.D = [1.6541397736403166e-01, -3.1762679367786140e-01, 0.0, 0.0, -1.4358526468495059e-01] # ([k1, k2, t1, t2, k3])
        camera_info_msg.K = [4.8488441935486514e+02, 0, 320, 0, 4.8488441935486514e+02, 180, 0, 0, 1]  # ([fx, 0, cx, 0, fy, cy, 0, 0, 1])
        camera_info_msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        self.camera_info_msg = camera_info_msg
예제 #12
0
 def load_camera_info(self):
     '''Load camera intrinsics'''
     filename = (sys.path[0] + "/calibrations/camera_intrinsic/" +
                 self.robot_name + ".yaml")
     if not os.path.isfile(filename):
         logger.warn(
             "no intrinsic calibration parameters for {}, trying default".
             format(self.robot_name))
         filename = (sys.path[0] +
                     "/calibrations/camera_intrinsic/default.yaml")
         if not os.path.isfile(filename):
             logger.error("can't find default either, something's wrong")
     calib_data = yaml_load_file(filename)
     #     logger.info(yaml_dump(calib_data))
     cam_info = CameraInfo()
     cam_info.width = calib_data['image_width']
     cam_info.height = calib_data['image_height']
     cam_info.K = np.array(calib_data['camera_matrix']['data']).reshape(
         (3, 3))
     cam_info.D = np.array(
         calib_data['distortion_coefficients']['data']).reshape((1, 5))
     cam_info.R = np.array(
         calib_data['rectification_matrix']['data']).reshape((3, 3))
     cam_info.P = np.array(calib_data['projection_matrix']['data']).reshape(
         (3, 4))
     cam_info.distortion_model = calib_data['distortion_model']
     logger.info(
         "Loaded camera calibration parameters for {} from {}".format(
             self.robot_name, os.path.basename(filename)))
     return cam_info
예제 #13
0
    def rosmsg(self):
        """:obj:`sensor_msgs.CamerInfo` : Returns ROS CamerInfo msg 
        """
        msg_header = Header()
        msg_header.frame_id = self._frame

        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0

        msg = CameraInfo()
        msg.header = msg_header
        msg.height = self._height
        msg.width = self._width
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [
            self._fx, 0.0, self._cx, 0.0, self._fy, self._cy, 0.0, 0.0, 1.0
        ]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [
            self._fx, 0.0, self._cx, 0.0, 0.0, self._fx, self._cy, 0.0, 0.0,
            0.0, 1.0, 0.0
        ]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi

        return msg
def info_720p():

    data = CameraInfo()

    data.height = 720
    data.width = 1280

    data.distortion_model = "plumb_bob"

    data.D = [-0.266169, 0.056566, 0.003569, 0.002922, 0.000000]

    data.K = [
        550.515474, 0.000000, 560.486530, 0.000000, 550.947091, 334.377088,
        0.000000, 0.000000, 1.000000
    ]

    data.R = [
        1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000,
        0.000000, 1.000000
    ]

    data.P = [
        395.931458, 0.000000, 570.158643, 0.000000, 0.000000, 474.049377,
        329.846866, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000
    ]

    data.binning_x = 0
    data.binning_y = 0
    data.roi.x_offset = 0
    data.roi.y_offset = 0
    data.roi.height = 0
    data.roi.width = 0
    data.roi.do_rectify = False

    return data
def info_480p():

    data = CameraInfo()

    data.height = 480
    data.width = 640

    data.distortion_model = "plumb_bob"

    data.D = [-0.297117, 0.070173, -0.000390, 0.005232, 0.000000]

    data.K = [
        342.399061, 0.000000, 284.222700, 0.000000, 343.988302, 227.555237,
        0.000000, 0.000000, 1.000000
    ]

    data.R = [
        1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000,
        0.000000, 1.000000
    ]

    data.P = [
        247.555710, 0.000000, 290.282918, 0.000000, 0.000000, 281.358612,
        220.776009, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000
    ]

    data.binning_x = 0
    data.binning_y = 0
    data.roi.x_offset = 0
    data.roi.y_offset = 0
    data.roi.height = 0
    data.roi.width = 0
    data.roi.do_rectify = False

    return data
예제 #16
0
 def test_CameraFOV(self):
     msg = CameraInfo()
     msg.height = 1024
     msg.width = 1280
     msg.distortion_model = 'plumb_bob'
     msg.D = [
         -0.5506515572367885, 0.16918149333674903, -0.0005494252446900035,
         -0.003574460971943457, 0.08824797068343779
     ]
     msg.K = [
         1547.3611792786492, 0.0, 645.7946620597459, 0.0,
         1546.5965535476455, 512.489834878375, 0.0, 0.0, 1.0
     ]
     msg.R = [
         0.9976063902119301, 0.0008462845042432227, 0.06914314145929477,
         -0.0007608300534628908, 0.9999989139538485, -0.0012622316559149822,
         -0.06914413457374326, 0.0012066041858555048, 0.997605950644034
     ]
     msg.P = [
         1445.628365834274, 0.0, 521.7993656184378, 0.0, 0.0,
         1445.628365834274, 514.7422812912976, 0.0, 0.0, 0.0, 1.0, 0.0
     ]
     camfov = ru.camera.CameraFOV(msg, maxdist=2.)
     corners = camfov.get_corners()
     vertices, faces = camfov.get_trimesh()
     self.assertTrue(camfov.contains(corners))
     points_inside = [camfov.random_point_inside() for _ in range(100)]
     self.assertTrue(camfov.contains(points_inside))
예제 #17
0
파일: image.py 프로젝트: lucasw/vimjay
    def __init__(self):
        rospy.init_node('image_publish')
        name = sys.argv[1]
        image = cv2.imread(name)
        #cv2.imshow("im", image)
        #cv2.waitKey(5)

        hz = rospy.get_param("~rate", 1)
        frame_id = rospy.get_param("~frame_id", "map")
        use_image = rospy.get_param("~use_image", True)
        rate = rospy.Rate(hz)

        self.ci_in = None
        ci_sub = rospy.Subscriber('camera_info_in', CameraInfo,
                                  self.camera_info_callback, queue_size=1)

        if use_image:
            pub = rospy.Publisher('image', Image, queue_size=1)
        ci_pub = rospy.Publisher('camera_info', CameraInfo, queue_size=1)

        msg = Image()
        msg.header.stamp = rospy.Time.now()
        msg.header.frame_id = frame_id
        msg.encoding = 'bgr8'
        msg.height = image.shape[0]
        msg.width = image.shape[1]
        msg.step = image.shape[1] * 3
        msg.data = image.tostring()
        if use_image:
            pub.publish(msg)

        ci = CameraInfo()
        ci.header = msg.header
        ci.height = msg.height
        ci.width = msg.width
        ci.distortion_model ="plumb_bob"
        # TODO(lucasw) need a way to set these values- have this node
        # subscribe to an input CameraInfo?
        ci.D = [0.0, 0.0, 0.0, 0, 0]
        ci.K = [500.0, 0.0, msg.width/2, 0.0, 500.0, msg.height/2, 0.0, 0.0, 1.0]
        ci.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        ci.P = [500.0, 0.0, msg.width/2, 0.0, 0.0, 500.0, msg.height/2, 0.0,  0.0, 0.0, 1.0, 0.0]
        # ci_pub.publish(ci)

        # TODO(lwalter) only run this is hz is positive,
        # otherwise wait for input trigger message to publish an image
        while not rospy.is_shutdown():
            if self.ci_in is not None:
                ci = self.ci_in

            msg.header.stamp = rospy.Time.now()
            ci.header = msg.header
            if use_image:
                pub.publish(msg)
            ci_pub.publish(ci)

            if hz <= 0:
                rospy.sleep()
            else:
                rate.sleep()
예제 #18
0
 def publish( self ):
     # Get the image.
     image = self.__videoDeviceProxy.getImageRemote( self.__videoDeviceProxyName );
         
     # Create Image message.
     ImageMessage = Image();
     ImageMessage.header.stamp.secs = image[ 5 ];
     ImageMessage.width = image[ 0 ];
     ImageMessage.height = image[ 1 ];
     ImageMessage.step = image[ 2 ] * image[ 0 ];
     ImageMessage.is_bigendian = False;
     ImageMessage.encoding = 'bgr8';
     ImageMessage.data = image[ 6 ];
     
     self.__imagePublisher.publish( ImageMessage );
     
     # Create CameraInfo message.
     # Data from the calibration phase is hard coded for now.
     CameraInfoMessage = CameraInfo();
     CameraInfoMessage.header.stamp.secs = image[ 5 ];
     CameraInfoMessage.width = image[ 0 ];
     CameraInfoMessage.height = image[ 1 ];
     CameraInfoMessage.D = [ -0.0769218451517258, 0.16183180613612602, 0.0011626049774280595, 0.0018733894100460534, 0.0 ];
     CameraInfoMessage.K = [ 581.090096189648, 0.0, 341.0926325830606, 0.0, 583.0323248080421, 241.02441593704128, 0.0, 0.0, 1.0 ];
     CameraInfoMessage.R = [ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ];
     CameraInfoMessage.P = [ 580.5918359588198, 0.0, 340.76398441334106, 0.0, 0.0, 582.4042541534321, 241.04182225157172, 0.0, 0.0, 0.0, 1.0, 0.0] ;
     CameraInfoMessage.distortion_model = 'plumb_bob';
     
     #CameraInfoMessage.roi.x_offset = self.__ROIXOffset;
     #CameraInfoMessage.roi.y_offset = self.__ROIYOffset;
     #CameraInfoMessage.roi.width = self.__ROIWidth;
     #CameraInfoMessage.roi.height = self.__ROIHeight;
     #CameraInfoMessage.roi.do_rectify = self.__ROIDoRectify;
     
     self.__cameraInfoPublisher.publish( CameraInfoMessage );
예제 #19
0
def run():
    rospy.init_node('publish_constant', anonymous=True)
    pub = rospy.Publisher(args.topic, CameraInfo, queue_size=10)
    rate = rospy.Rate(30)

    filename = args.camera_info
    with open(filename, 'r') as f:
        camera_data = yaml.load(f)

    camera_info = CameraInfo()
    camera_info.height = camera_data['height']
    camera_info.width = camera_data['width']
    camera_info.K = camera_data['K']
    camera_info.D = camera_data['D']
    camera_info.R = camera_data['R']
    camera_info.P = camera_data['P']
    camera_info.distortion_model = camera_data['distortion_model']

    camera_info.header.frame_id = args.frame
    # Setting this to zero means 'take the newest camera pose if TF'.
    camera_info.header.stamp = rospy.Time(0)

    while not rospy.is_shutdown():
        pub.publish(camera_info)
        rate.sleep()
    def send_test_messages(self, filename):
        self.msg_received = False
        # Publish the camera info TODO make this a field in the annotations file to dictate the source calibration file
        msg_info = CameraInfo()
        msg_info.height = 480
        msg_info.width = 640
        msg_info.distortion_model = "plumb_bob"
        msg_info.D = [-0.28048157543793056, 0.05674481026365553, -0.000988764087143394, -0.00026869128565781613, 0.0]
        msg_info.K = [315.128501, 0.0, 323.069638, 0.0, 320.096636, 218.012581, 0.0, 0.0, 1.0]
        msg_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg_info.P = [217.48876953125, 0.0, 321.3154072384932, 0.0, 0.0, 250.71084594726562, 202.30416165274983, 0.0,
                      0.0, 0.0, 1.0, 0.0]
        msg_info.roi.do_rectify = False

        # Publish the test image
        img = cv2.imread(filename)
        cvb = CvBridge()
        msg_raw = cvb.cv2_to_imgmsg(img, encoding="bgr8")
        self.pub_info.publish(msg_info)
        self.pub_raw.publish(msg_raw)

        # Wait for the message to be received
        timeout = rospy.Time.now() + rospy.Duration(10)  # Wait at most 5 seconds for the node to reply
        while not self.msg_received and not rospy.is_shutdown() and rospy.Time.now() < timeout:
            rospy.sleep(0.1)
        self.assertLess(rospy.Time.now(), timeout, "Waiting for apriltag detection timed out.")
예제 #21
0
    def camera_info(self):
        msg_header = Header()
        msg_header.frame_id = "f450/robot_camera_down"
        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0

        msg = CameraInfo()
        msg.header = msg_header
        msg.height = 480
        msg.width = 640
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [1.0, 0.0, 320.5, 0.0, 1.0, 240.5, 0.0, 0.0, 1.0]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [
            1.0, 0.0, 320.5, -0.0, 0.0, 1.0, 240.5, 0.0, 0.0, 0.0, 1.0, 0.0
        ]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi
        return msg
예제 #22
0
def yaml_to_CameraInfo():
    rospy.init_node("camera_info_publisher", anonymous=True)
    info_publisher = rospy.Publisher("camera_info", CameraInfo, queue_size=10)
    # yaml_publisher = rospy.Publisher("yaml_filename", String, queue_size=10)

    rate = rospy.Rate(10)  # 10hz
    yaml_fname = rospy.get_param('~camera_yaml')
    # yaml_fname = rospy.get_param('camera_yaml')

    with open(yaml_fname, "r") as file_handle:
        calib_data = yaml.load(file_handle)
    # Parse
    camera_info_msg = CameraInfo()
    camera_info_msg.width = calib_data["image_width"]
    camera_info_msg.height = calib_data["image_height"]
    camera_info_msg.K = calib_data["camera_matrix"]["data"]
    camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
    camera_info_msg.R = calib_data["rectification_matrix"]["data"]
    camera_info_msg.P = calib_data["projection_matrix"]["data"]
    camera_info_msg.distortion_model = calib_data["distortion_model"]

    while not rospy.is_shutdown():
        # yaml_publisher.publish(yaml_fname)
        info_publisher.publish(camera_info_msg)
        rate.sleep()
def publishing(pub_image, pub_camera, image, type_of_camera):
    if type_of_camera is 1:
        image.convert(carla.ColorConverter.Depth)
    elif type_of_camera is 2:
        image.convert(carla.ColorConverter.CityScapesPalette)
    array = np.frombuffer(image.raw_data, dtype=np.uint8)
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]
    img = Image()
    infomsg = CameraInfo()

    img.header.stamp = rospy.Time.now()
    img.header.frame_id = 'base'
    img.height = infomsg.height = image.height
    img.width = infomsg.width = image.width
    img.encoding = "rgb8"
    img.step = img.width * 3 * 1
    st1 = array.tostring()
    img.data = st1

    cx = infomsg.width / 2.0
    cy = infomsg.height / 2.0
    fx = fy = infomsg.width / (2.0 * math.tan(image.fov * math.pi / 360.0))
    infomsg.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]
    infomsg.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1, 0]
    infomsg.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]
    infomsg.D = [0, 0, 0, 0, 0]
    infomsg.binning_x = 0
    infomsg.binning_y = 0
    infomsg.distortion_model = "plumb_bob"
    infomsg.header = img.header

    pub_image.publish(img)
    pub_camera.publish(infomsg)
예제 #24
0
 def publish(self, event):
     if self.imgmsg is None:
         return
     now = rospy.Time.now()
     # setup ros message and publish
     with self.lock:
         self.imgmsg.header.stamp = now
         self.imgmsg.header.frame_id = self.frame_id
     self.pub.publish(self.imgmsg)
     if self.publish_info:
         info = CameraInfo()
         info.header.stamp = now
         info.header.frame_id = self.frame_id
         info.width = self.imgmsg.width
         info.height = self.imgmsg.height
         if self.fovx is not None and self.fovy is not None:
             fx = self.imgmsg.width / 2.0 / \
                 np.tan(np.deg2rad(self.fovx / 2.0))
             fy = self.imgmsg.height / 2.0 / \
                 np.tan(np.deg2rad(self.fovy / 2.0))
             cx = self.imgmsg.width / 2.0
             cy = self.imgmsg.height / 2.0
             info.K = np.array([fx, 0, cx,
                                0, fy, cy,
                                0, 0, 1.0])
             info.P = np.array([fx, 0, cx, 0,
                                0, fy, cy, 0,
                                0, 0, 1, 0])
             info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
         self.pub_info.publish(info)
예제 #25
0
def load_camera_info_3(robot):
    # Load camera information
    filename = (os.environ['DUCKIEFLEET_ROOT'] +
                "/calibrations/camera_intrinsic/" + robot + ".yaml")
    if not os.path.isfile(filename):
        dtu.logger.warn(
            "no intrinsic calibration parameters for {}, trying default".
            format(robot))
        filename = (os.environ['DUCKIEFLEET_ROOT'] +
                    "/calibrations/camera_intrinsic/default.yaml")
        if not os.path.isfile(filename):
            dtu.logger.error("can't find default either, something's wrong")
    calib_data = dtu.yaml_wrap.yaml_load_file(filename)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    dtu.logger.info(
        "Loaded camera calibration parameters for {} from {}".format(
            robot, os.path.basename(filename)))
    return cam_info
예제 #26
0
def make_camera_msg(cam, rgb_time_sec, rgb_time_nsec, test_image):
    camera_info_msg = CameraInfo()
    camera_info_msg.header.seq = test_image

    if rgb_time_nsec + 20000000 > 999999999:
        camera_info_msg.header.stamp.secs = rgb_time_sec + 1
        camera_info_msg.header.stamp.nsecs = rgb_time_nsec + 20000000 - 100000000
    else:
        camera_info_msg.header.stamp.secs = rgb_time_sec
        camera_info_msg.header.stamp.nsecs = rgb_time_nsec + 20000000
    # camera_info_msg.header.stamp.secs = rgb_time_sec
    # camera_info_msg.header.stamp.nsecs = rgb_time_nsec + 20000000

    camera_info_msg.header.frame_id = "/openni_rgb_optical_frame"
    camera_info_msg.distortion_model = "plumb_bob"
    width, height = cam[0], cam[1]
    fx, fy = cam[2], cam[3]
    cx, cy = cam[4], cam[5]
    camera_info_msg.width = width
    camera_info_msg.height = height
    camera_info_msg.K = [fx, 0, cx,
                         0, fy, cy,
                         0, 0, 1]

    camera_info_msg.D = [0, 0, 0, 0, 0]

    camera_info_msg.R = [1, 0, 0,
                         0, 1, 0,
                         0, 0, 1]

    camera_info_msg.P = [fx, 0, cx, 0,
                         0, fy, cy, 0,
                         0, 0, 1, 0]

    return camera_info_msg
예제 #27
0
파일: kitti2bag.py 프로젝트: zxw610/LIO-SAM
def save_camera_data(bag, kitti_type, kitti, util, bridge, camera,
                     camera_frame_id, topic, initial_time):
    print("Exporting camera {}".format(camera))
    if kitti_type.find("raw") != -1:
        camera_pad = '{0:02d}'.format(camera)
        image_dir = os.path.join(kitti.data_path,
                                 'image_{}'.format(camera_pad))
        image_path = os.path.join(image_dir, 'data')
        image_filenames = sorted(os.listdir(image_path))
        with open(os.path.join(image_dir, 'timestamps.txt')) as f:
            image_datetimes = map(
                lambda x: datetime.strptime(x[:-4], '%Y-%m-%d %H:%M:%S.%f'),
                f.readlines())

        calib = CameraInfo()
        calib.header.frame_id = camera_frame_id
        calib.width, calib.height = tuple(
            util['S_rect_{}'.format(camera_pad)].tolist())
        calib.distortion_model = 'plumb_bob'
        calib.K = util['K_{}'.format(camera_pad)]
        calib.R = util['R_rect_{}'.format(camera_pad)]
        calib.D = util['D_{}'.format(camera_pad)]
        calib.P = util['P_rect_{}'.format(camera_pad)]

    elif kitti_type.find("odom") != -1:
        camera_pad = '{0:01d}'.format(camera)
        image_path = os.path.join(kitti.sequence_path,
                                  'image_{}'.format(camera_pad))
        image_filenames = sorted(os.listdir(image_path))
        image_datetimes = map(lambda x: initial_time + x.total_seconds(),
                              kitti.timestamps)

        calib = CameraInfo()
        calib.header.frame_id = camera_frame_id
        calib.P = util['P{}'.format(camera_pad)]

    iterable = zip(image_datetimes, image_filenames)
    bar = progressbar.ProgressBar()
    for dt, filename in bar(iterable):
        image_filename = os.path.join(image_path, filename)
        cv_image = cv2.imread(image_filename)
        calib.height, calib.width = cv_image.shape[:2]
        if camera in (0, 1):
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        encoding = "mono8" if camera in (0, 1) else "bgr8"
        image_message = bridge.cv2_to_imgmsg(cv_image, encoding=encoding)
        image_message.header.frame_id = camera_frame_id
        if kitti_type.find("raw") != -1:
            image_message.header.stamp = rospy.Time.from_sec(
                float(datetime.strftime(dt, "%s.%f")))
            topic_ext = "/image_raw"
        elif kitti_type.find("odom") != -1:
            image_message.header.stamp = rospy.Time.from_sec(dt)
            topic_ext = "/image_rect"
        calib.header.stamp = image_message.header.stamp
        bag.write(topic + topic_ext,
                  image_message,
                  t=image_message.header.stamp)
        bag.write(topic + '/camera_info', calib, t=calib.header.stamp)
예제 #28
0
def param2Msg(K, RT, width, height):
    param = CameraInfo()
    param.height = height
    param.width = width
    param.K = K
    param.R = RT

    return param
예제 #29
0
    def default(self, ci="unused"):
        if not self.component_instance.capturing:
            return  # press [Space] key to enable capturing

        image_local = self.data["image"]

        image = Image()
        image.header = self.get_ros_header()
        image.header.frame_id += "/base_image"
        image.height = self.component_instance.image_height
        image.width = self.component_instance.image_width
        image.encoding = "rgba8"
        image.step = image.width * 4

        # VideoTexture.ImageRender implements the buffer interface
        image.data = bytes(image_local)

        # fill this 3 parameters to get correcty image with stereo camera
        Tx = 0
        Ty = 0
        R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

        intrinsic = self.data["intrinsic_matrix"]

        camera_info = CameraInfo()
        camera_info.header = image.header
        camera_info.height = image.height
        camera_info.width = image.width
        camera_info.distortion_model = "plumb_bob"
        camera_info.K = [
            intrinsic[0][0],
            intrinsic[0][1],
            intrinsic[0][2],
            intrinsic[1][0],
            intrinsic[1][1],
            intrinsic[1][2],
            intrinsic[2][0],
            intrinsic[2][1],
            intrinsic[2][2],
        ]
        camera_info.R = R
        camera_info.P = [
            intrinsic[0][0],
            intrinsic[0][1],
            intrinsic[0][2],
            Tx,
            intrinsic[1][0],
            intrinsic[1][1],
            intrinsic[1][2],
            Ty,
            intrinsic[2][0],
            intrinsic[2][1],
            intrinsic[2][2],
            0,
        ]

        self.publish(image)
        self.topic_camera_info.publish(camera_info)
예제 #30
0
def fill_camera_info(calib):
    ci = CameraInfo()
    ci.width = int(calib['image_width'])
    ci.height = int(calib['image_height'])
    ci.distortion_model = calib['distortion_model']
    ci.D = calib['distortion_coefficients']['data']
    ci.K = calib['camera_matrix']['data']
    ci.R = calib['rectification_matrix']['data']
    ci.P = calib['projection_matrix']['data']
    return ci
예제 #31
0
    def __init__(self, topic_name):
        # Set publisher.
        self._pub = rospy.Publisher(topic_name, CameraInfo, queue_size=5)

        # Create default camera info:
        camera_info = CameraInfo()  # This is ROS camera info. Not mine.
        camera_info.distortion_model = "plumb_bob"
        camera_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        camera_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        self._default_camera_info = camera_info
예제 #32
0
def yaml_to_camera_info(yaml_data):
    camera_info = CameraInfo()
    camera_info.header.frame_id = yaml_data['camera_name']
    camera_info.D = yaml_data['distortion_coefficients']['data']
    camera_info.K = yaml_data['camera_matrix']['data']
    camera_info.P = yaml_data['projection_matrix']['data']
    camera_info.R = yaml_data['rectification_matrix']['data']
    camera_info.distortion_model = yaml_data['distortion_model']
    camera_info.height = yaml_data['image_height']
    camera_info.width = yaml_data['image_width']
    return camera_info
예제 #33
0
def callback(msg):
    info = CameraInfo()
    info.header = msg.header
    info.height = data['image_height']
    info.width = data['image_width']
    info.distortion_model = data['distortion_model']
    info.D = data['distortion_coefficients']['data']
    info.K = data['camera_matrix']['data']
    info.P = data['projection_matrix']['data']
    info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
    info_pub.publish(info)
예제 #34
0
    def write_camera(self, folder, topic, frame, ext):

        print(' Writing camera: ' + topic)

        records = self.read_csv(self.path + self.name + '/' + folder + '/' +
                                'timestamps.txt')
        bridge = cv_bridge.CvBridge()

        seq = 0
        for row in tqdm(records):

            filename = row[0] + ext
            timestamp = row[1]

            image_path = self.path + self.name + '/' + folder + '/' + filename
            img = cv2.imread(image_path)

            encoding = "bgr8"
            image_message = bridge.cv2_to_imgmsg(img, encoding=encoding)

            image_message.header.frame_id = frame
            image_message.header.stamp = self.timestamp_to_stamp(timestamp)
            image_message.header.seq = seq
            seq += 1

            self.bag.write(topic + '/camera',
                           image_message,
                           t=image_message.header.stamp)

            camera_info = CameraInfo()
            camera_info.header = image_message.header
            camera_info.height = img.shape[0]
            camera_info.width = img.shape[1]
            camera_info.distortion_model = "plumb_bob"
            camera_info.D = [
                -0.15402600433198144, 0.08558850995478451,
                0.002075813671243975, 0.0006580423624898167,
                -0.016293022125192957
            ]
            camera_info.K = [
                1376.8981317210023, 0.0, 957.4934213691823, 0.0,
                1378.3903002987945, 606.5795886217022, 0.0, 0.0, 1.0
            ]
            camera_info.R = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
            camera_info.P = [
                1376.8981317210023, 0.0, 957.4934213691823, 0.0, 0.0,
                1378.3903002987945, 606.5795886217022, 0.0, 0.0, 0.0, 1.0, 0.0
            ]
            camera_info.binning_x = 1
            camera_info.binning_y = 1

            self.bag.write(topic + '/camera_info',
                           image_message,
                           t=image_message.header.stamp)
예제 #35
0
 def yaml_to_camera_info(self, yaml_file):
     with open(yaml_file, "r") as f:
         calib_data = yaml.load(f)
     camera_info_msg = CameraInfo()
     camera_info_msg.width = calib_data["image_width"]
     camera_info_msg.height = calib_data["image_height"]
     camera_info_msg.K = calib_data["camera_matrix"]["data"]
     camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
     camera_info_msg.R = calib_data["rectification_matrix"]["data"]
     camera_info_msg.P = calib_data["projection_matrix"]["data"]
     camera_info_msg.distortion_model = calib_data["distortion_model"]
     return camera_info_msg
예제 #36
0
 def load_camera_info_2(self, filename):
     stream = open(filename, 'r')
     calib_data = yaml.load(stream)
     cam_info = CameraInfo()
     cam_info.width = calib_data['image_width']
     cam_info.height = calib_data['image_height']
     cam_info.K = calib_data['camera_matrix']['data']
     cam_info.D = calib_data['distortion_coefficients']['data']
     cam_info.R = calib_data['rectification_matrix']['data']
     cam_info.P = calib_data['projection_matrix']['data']
     cam_info.distortion_model = calib_data['distortion_model']
     return cam_info
예제 #37
0
 def _load_camera_info(self, filename):
     calib_data = yaml_load_file(filename)
     #     logger.info(yaml_dump(calib_data))
     cam_info = CameraInfo()
     cam_info.width = calib_data['image_width']
     cam_info.height = calib_data['image_height']
     cam_info.K = np.array(calib_data['camera_matrix']['data']).reshape((3,3))
     cam_info.D = np.array(calib_data['distortion_coefficients']['data']).reshape((1,5))
     cam_info.R = np.array(calib_data['rectification_matrix']['data']).reshape((3,3))
     cam_info.P = np.array(calib_data['projection_matrix']['data']).reshape((3,4))
     cam_info.distortion_model = calib_data['distortion_model']
     return cam_info
def parse_yaml(filename):
    stream = file(filename, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    return cam_info
예제 #39
0
 def parse_yaml(self, filename):
     stream = file(filename, "r")
     calib_data = yaml.load(stream)
     cam_info = CameraInfo()
     cam_info.width = calib_data["image_width"]
     cam_info.height = calib_data["image_height"]
     cam_info.K = calib_data["camera_matrix"]["data"]
     cam_info.D = calib_data["distortion_coefficients"]["data"]
     cam_info.R = calib_data["rectification_matrix"]["data"]
     cam_info.P = calib_data["projection_matrix"]["data"]
     cam_info.distortion_model = calib_data["distortion_model"]
     return cam_info
	def createMsg(self):
		msg = CameraInfo()
		msg.height = 480
		msg.width = 640
		msg.distortion_model = "plumb_bob"
		msg.D = [0.08199114285264993, -0.04549390835713936, -0.00040960290587863145, 0.0009833748346549968, 0.0]
		msg.K = [723.5609128875188, 0.0, 315.9845354772031, 0.0, 732.2615109685506, 242.26660681756633,\
				 0.0, 0.0, 1.0]
		msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
		msg.P = [737.1736450195312, 0.0, 316.0324931112791, 0.0, 0.0, 746.1573486328125, 241.59042622688867,\
				 0.0, 0.0, 0.0, 1.0, 0.0]
		self.msg = msg
예제 #41
0
def load_cam_info(calib_file):
    cam_info = CameraInfo()
    with open(calib_file,'r') as cam_calib_file :
        cam_calib = yaml.load(cam_calib_file)
        cam_info.height = cam_calib['image_height']
        cam_info.width = cam_calib['image_width']
        cam_info.K = cam_calib['camera_matrix']['data']
        cam_info.D = cam_calib['distortion_coefficients']['data']
        cam_info.R = cam_calib['rectification_matrix']['data']
        cam_info.P = cam_calib['projection_matrix']['data']
        cam_info.distortion_model = cam_calib['distortion_model']
    return cam_info
예제 #42
0
def yaml_to_CameraInfo(yaml_fname):
    with open(yaml_fname, "r") as file_handle:
        calib_data = yaml.load(file_handle)
 
    camera_info_msg = CameraInfo()
    camera_info_msg.width = calib_data["image_width"]
    camera_info_msg.height = calib_data["image_height"]
    camera_info_msg.K = calib_data["camera_matrix"]["data"]
    camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
    camera_info_msg.R = calib_data["rectification_matrix"]["data"]
    camera_info_msg.P = calib_data["projection_matrix"]["data"]
    camera_info_msg.distortion_model = calib_data["distortion_model"]
    return camera_info_msg
예제 #43
0
    def get_cam_info(self, cam_name):
        cam_name = "calibrations/" + cam_name + ".yaml"
        stream = file(os.path.join(os.path.dirname(__file__), cam_name), 'r')

        calib_data = yaml.load(stream)

        cam_info = CameraInfo()
        cam_info.width = calib_data['image_width']
        cam_info.height = calib_data['image_height']
        cam_info.K = calib_data['camera_matrix']['data']
        cam_info.D = calib_data['distortion_coefficients']['data']
        cam_info.R = calib_data['rectification_matrix']['data']
        cam_info.P = calib_data['projection_matrix']['data']
        cam_info.distortion_model = calib_data['distortion_model']
        return cam_info
    def toCameraInfo(self):
        msg = CameraInfo()

        (msg.width, msg.height) = self.size

        if self.D.size > 5:
            msg.distortion_model = "rational_polynomial"
        else:
            msg.distortion_model = "plumb_bob"

        msg.D = numpy.ravel(self.D).copy().tolist()
        msg.K = numpy.ravel(self.K).copy().tolist()
        msg.R = numpy.ravel(self.R).copy().tolist()
        msg.P = numpy.ravel(self.P).copy().tolist()

        return msg
예제 #45
0
def GetCameraInfo(width, height):
	cam_info = CameraInfo()
	cam_info.width = width
	cam_info.height = height
	cam_info.distortion_model = model
	#cam_info.D = [0.0]*5
	#cam_info.K = [0.0]*9
	#cam_info.R = [0.0]*9
	#cam_info.P = [0.0]*12
	cam_info.D = D
        cam_info.K = K
        cam_info.R = R
        cam_info.P = P
	cam_info.binning_x = 0
	cam_info.binning_y = 0
	return cam_info
예제 #46
0
def pickle_to_info(info_pickle):
    info = CameraInfo()
    info.header.stamp = rospy.Time()
    info.header.seq = info_pickle.header.seq
    info.header.frame_id = info_pickle.header.frame_id
    info.roi.x_offset = info_pickle.roi.x_offset
    info.roi.y_offset = info_pickle.roi.y_offset
    info.roi.height = info_pickle.roi.height
    info.roi.width = info_pickle.roi.width
    info.height = info_pickle.height
    info.width = info_pickle.width
    info.D = info_pickle.D
    info.K = info_pickle.K
    info.R = info_pickle.R
    info.P = info_pickle.P
    
    return info
예제 #47
0
    def default(self, ci='unused'):
        if not self.component_instance.capturing:
            return # press [Space] key to enable capturing

        image_local = self.data['image']

        image = Image()
        image.header = self.get_ros_header()
        image.height = self.component_instance.image_height
        image.width = self.component_instance.image_width
        image.encoding = self.encoding
        image.step = image.width * 4

        # VideoTexture.ImageRender implements the buffer interface
        image.data = bytes(image_local)

        # fill this 3 parameters to get correcty image with stereo camera
        Tx = 0
        Ty = 0
        R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

        intrinsic = self.data['intrinsic_matrix']

        camera_info = CameraInfo()
        camera_info.header = image.header
        camera_info.height = image.height
        camera_info.width = image.width
        camera_info.distortion_model = 'plumb_bob'
        camera_info.D = [0]
        camera_info.K = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2],
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2],
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2]]
        camera_info.R = R
        camera_info.P = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2], Tx,
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2], Ty,
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2], 0]

        if self.pub_tf:
            self.publish_with_robot_transform(image)
        else:
            self.publish(image)
        self.topic_camera_info.publish(camera_info)
def parse_yaml(filename):
    stream = file(filename, "r")
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data["image_width"]
    cam_info.height = calib_data["image_height"]
    cam_info.K = calib_data["camera_matrix"]["data"]
    cam_info.D = calib_data["distortion_coefficients"]["data"]
    cam_info.R = calib_data["rectification_matrix"]["data"]
    cam_info.P = calib_data["projection_matrix"]["data"]
    cam_info.distortion_model = calib_data["distortion_model"]
    cam_info.binning_x = calib_data["binning_x"]
    cam_info.binning_y = calib_data["binning_y"]
    cam_info.roi.x_offset = calib_data["roi"]["x_offset"]
    cam_info.roi.y_offset = calib_data["roi"]["y_offset"]
    cam_info.roi.height = calib_data["roi"]["height"]
    cam_info.roi.width = calib_data["roi"]["width"]
    cam_info.roi.do_rectify = calib_data["roi"]["do_rectify"]

    return cam_info
예제 #49
0
    def fetch_image(self, cam):
        cam.simulate()

        if not cam.pixels:
            return None, None
        cv_img = cv.CreateImageHeader((cam.width , cam.height), cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_img, cam.pixels, cam.width*3)
        cv.ConvertImage(cv_img, cv_img, cv.CV_CVTIMG_FLIP)
        im = self.bridge.cv_to_imgmsg(cv_img, "rgb8")

        caminfo = CameraInfo()
        caminfo.header = im.header
        caminfo.height = cam.height
        caminfo.width = cam.width
        caminfo.D = 5*[0.]
        caminfo.K = sum([list(r) for r in cam.K],[])
        caminfo.P = sum([list(r) for r in cam.P],[])
        caminfo.R = sum([list(r) for r in cam.R],[])

        return im, caminfo
def parse_yaml(filename):
    stream = file(filename, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    cam_info.binning_x = calib_data['binning_x']
    cam_info.binning_y = calib_data['binning_y']
    cam_info.roi.x_offset = calib_data['roi']['x_offset']
    cam_info.roi.y_offset = calib_data['roi']['y_offset']
    cam_info.roi.height = calib_data['roi']['height']
    cam_info.roi.width = calib_data['roi']['width']
    cam_info.roi.do_rectify = calib_data['roi']['do_rectify']
    
    return cam_info
예제 #51
0
파일: sensors.py 프로젝트: cyy1991/carla
    def build_camera_info(self):
        """
        computing camera info

        camera info doesn't change over time
        """
        camera_info = CameraInfo()
        camera_info.header.frame_id = self.name
        camera_info.width = self.carla_object.ImageSizeX
        camera_info.height = self.carla_object.ImageSizeY
        camera_info.distortion_model = 'plumb_bob'
        cx = self.carla_object.ImageSizeX / 2.0
        cy = self.carla_object.ImageSizeY / 2.0
        fx = self.carla_object.ImageSizeX / (
            2.0 * math.tan(self.carla_object.FOV * math.pi / 360.0))
        fy = fx
        camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]
        camera_info.D = [0, 0, 0, 0, 0]
        camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]
        camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0]
        self._camera_info = camera_info
def get_camera_info(hard_coded=True):

    if hard_coded:
        cx = 319.5
        cy = 239.5
        fx = 525.5
        fy = 525.5

        return (cx, cy, fx, fy)

    #if we are using a different camera, then
    #we can listen to the ros camera info topic for that device
    #and get our values here.
    else:

        import image_geometry
        from sensor_msgs.msg import CameraInfo

        cam_info = CameraInfo()

        cam_info.height = 480
        cam_info.width = 640
        cam_info.distortion_model = "plumb_bob"
        cam_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        cam_info.K = [525.0, 0.0, 319.5, 0.0, 525.0, 239.5, 0.0, 0.0, 1.0]
        cam_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        cam_info.P = [525.0, 0.0, 319.5, 0.0, 0.0, 525.0, 239.5, 0.0, 0.0, 0.0, 1.0, 0.0]
        cam_info.binning_x = 0
        cam_info.binning_y = 0
        cam_info.roi.x_offset = 0
        cam_info.roi.y_offset = 0
        cam_info.roi.height = 0
        cam_info.roi.width = 0
        cam_info.roi.do_rectify = False

        camera_model = image_geometry.PinholeCameraModel()
        camera_model.fromCameraInfo(cam_info)

        return camera_model.cx(), camera_model.cy(), camera_model.fx(), camera_model.fy()
def loadCalibrationFile(filename, cname):
    """ Load calibration data from a file.

    This function returns a `sensor_msgs/CameraInfo`_ message, based
    on the filename parameter.  An empty or non-existent file is *not*
    considered an error; a null CameraInfo being provided in that
    case.

    :param filename: location of CameraInfo to read
    :param cname: Camera name.
    :returns: `sensor_msgs/CameraInfo`_ message containing calibration,
              if file readable; null calibration message otherwise.
    :raises: :exc:`IOError` if an existing calibration file is unreadable.

    """
    ci = CameraInfo()
    try:
        f = open(filename)
        calib = yaml.load(f)
        if calib is not None:
            if calib['camera_name'] != cname:
                rospy.logwarn("[" + cname + "] does not match name " +
                              calib['camera_name'] + " in file " + filename)

            # fill in CameraInfo fields
            ci.width = calib['image_width']
            ci.height = calib['image_height']
            ci.distortion_model = calib['distortion_model']
            ci.D = calib['distortion_coefficients']['data']
            ci.K = calib['camera_matrix']['data']
            ci.R = calib['rectification_matrix']['data']
            ci.P = calib['projection_matrix']['data']

    except IOError:                     # OK if file did not exist
        pass

    return ci
예제 #54
0
 def get_camera_info(self, camera_name, img_name='depth'):
     camera_info = CameraInfo()
     file_url = ''
     try : 
         file_url = rospy.get_param(camera_name+'/driver/'+img_name+'_camera_info_url').replace('file://','')
     except Exception,e: print e
             
     if not os.path.exists(file_url):
         print 'ERROR: Could not read '+ camera_name+ ' '+img_name +'_camera_info'
         print '     Calibrate the sensor and try again !'
         exit(0)
         return
 
     print 'Loading camera '+img_name +'_camera_info for '+camera_name+' at:',file_url
     with open(file_url, 'r') as f:
         calib = yaml.safe_load(f.read())
         camera_info.K = np.matrix(calib["camera_matrix"]["data"])
         camera_info.D = np.array(calib["distortion_coefficients"]["data"])
         camera_info.R = np.matrix(calib["rectification_matrix"]["data"])
         camera_info.P = np.matrix(calib["projection_matrix"]["data"])
         camera_info.height = calib["image_height"]
         camera_info.width = calib["image_width"]
         print camera_info
     return camera_info
예제 #55
0
pub_info = rospy.Publisher("~camera_info", CameraInfo)
frame_id = rospy.get_param('~frame_id', 'dummy_camera')
width = rospy.get_param('~width', 640)
height = rospy.get_param('~height', 480)

r = rospy.Rate(10)
while not rospy.is_shutdown():
    now = rospy.Time(0)
    dummy_camera_info = CameraInfo()
    dummy_camera_info.width = width
    dummy_camera_info.height = height
    dummy_camera_info.header.frame_id = frame_id
    dummy_camera_info.header.stamp  = now
    dummy_camera_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
    dummy_camera_info.K = [525.0, 0.0, 319.5, 0.0, 525.0, 239.5, 0.0, 0.0, 1.0]
    dummy_camera_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
    dummy_camera_info.P = [525.0, 0.0, 319.5, 0.0, 0.0, 525.0, 239.5, 0.0, 0.0, 0.0, 1.0, 0.0]
    dummy_camera_info.distortion_model = "plumb_bob"
    pub_info.publish(dummy_camera_info)

    dummy_img = Image()
    dummy_img.width = width
    dummy_img.height = height
    dummy_img.header.frame_id = frame_id
    dummy_img.header.stamp = now
    dummy_img.step = width
    dummy_img.encoding = "mono8"
    dummy_img.data = [0] * width * height
    pub.publish(dummy_img)
    r.sleep()
예제 #56
0
with open(file_cfg) as f:
    lines = f.readlines()

parser = lambda label: map(float, [line for line in lines \
    if line.startswith(label)][0].strip().split('(')[1].split(')')[0].split(','))

i = parser('CAMERA%s_INTRINSIC:'%camera)
d = parser('CAMERA%s_DISTORTION:'%camera)
# TODO
intrinsic = [[0]*3]*3
# http://docs.ros.org/api/sensor_msgs/html/msg/CameraInfo.html
# http://wiki.ros.org/image_pipeline/CameraInfo
camera_info = CameraInfo()
camera_info.distortion_model = 'plumb_bob'
# fill this 3 parameters to get correcty image with stereo camera
Tx = 0
Ty = 0
R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
camera_info.D = [0]
camera_info.K = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2],
                 intrinsic[1][0], intrinsic[1][1], intrinsic[1][2],
                 intrinsic[2][0], intrinsic[2][1], intrinsic[2][2]]
camera_info.R = R
camera_info.P = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2], Tx,
                 intrinsic[1][0], intrinsic[1][1], intrinsic[1][2], Ty,
                 intrinsic[2][0], intrinsic[2][1], intrinsic[2][2], 0]

with open(file_yml, 'w') as f:
    f.write(str(camera_info))
from sensor_msgs.msg import CameraInfo, PointCloud2

if __name__ == "__main__":
    rospy.init_node("laser_camera_fov_sample")
    pub_info = rospy.Publisher("~info", CameraInfo)
    pub_cloud = rospy.Publisher("~cloud", PointCloud2)
    rate = rospy.Rate(1)
    while not rospy.is_shutdown():
        info = CameraInfo()
        info.header.stamp = rospy.Time.now()
        info.header.frame_id = "origin"
        info.height = 544
        info.width = 1024
        info.D = [-0.20831339061260223, 0.11341656744480133,
                  -0.00035378438769839704, -1.746419547998812e-05,
                  0.013720948249101639, 0.0, 0.0, 0.0]
        info.K = [598.6097412109375, 0.0, 515.5960693359375,
                  0.0, 600.0813598632812, 255.42999267578125,
                  0.0, 0.0, 1.0]
        info.R = [0.999993085861206, 0.0022128731943666935, -0.0029819998890161514,
                  -0.0022144035901874304, 0.9999974370002747, -0.0005100672133266926,
                  0.002980863442644477, 0.0005166670307517052, 0.9999954104423523]
        info.P = [575.3445434570312, 0.0, 519.5, 0.0,
                  0.0, 575.3445434570312, 259.5, 0.0,
                  0.0, 0.0, 1.0, 0.0]
        pub_info.publish(info)
        cloud = PointCloud2()
        cloud.header.frame_id = "origin"
        pub_cloud.publish(cloud)
        rate.sleep()
def main():
    parser = argparse.ArgumentParser(
        description="Convert an LCM log to a ROS bag (mono/stereo images only).")
    parser.add_argument('lcm_file', help='Input LCM log.', action='store')
    parser.add_argument('left_img_channel', help='LCM channel for left image.')
    parser.add_argument('left_camera_yml',
                        help='Image calibration YAML file from ROS calibrator')
    parser.add_argument('--right_img_channel', help='LCM channel for right image.',
                        action='append', dest='lcm_channels')
    parser.add_argument('--right_camera_yml',
                        help='Image calibration YAML file from ROS calibrator',
                        action='append', dest='yml_files')

    roi_parser = parser.add_argument_group("Format7/ROI", "Format7/ROI options needed when dealing with non-standard video modes.")
    roi_parser.add_argument('--binning_x', default=1, type=int, dest='binning_x', help='Image binning factor.')
    roi_parser.add_argument('--binning_y', default=1, type=int, dest='binning_y', help='Image binning factor.')
    roi_parser.add_argument('--x_offset', default=0, type=int, dest='x_offset', help="ROI x offset (in UNBINNED pixels)")
    roi_parser.add_argument('--y_offset', default=0, type=int, dest='y_offset', help="ROI y offset (in UNBINNED pixels)")
    roi_parser.add_argument('--width', default=640, type=int, dest='width', help="ROI width (in UNBINNED pixels)")
    roi_parser.add_argument('--height', default=480, type=int, dest='height', help="ROI height (in UNBINNED pixels)")
    roi_parser.add_argument('--do_rectify', default=False, type=bool, dest='do_rectify', help="Do rectification when querying ROI.")
    args = parser.parse_args()

    if args.lcm_channels is None:
        args.lcm_channels = []
    if args.yml_files is None:
        args.yml_files = []

    args.lcm_channels.append(args.left_img_channel)
    args.yml_files.append(args.left_camera_yml)

    if len(args.lcm_channels) != len(args.yml_files):
        print "LCM channel-YAML file mismatch!"

    print "Converting images in %s to ROS bag file..." % (args.lcm_file)

    log = lcm.EventLog(args.lcm_file, 'r')
    bag = rosbag.Bag(args.lcm_file + '.images.bag', 'w')

    # Read in YAML files.
    yml = []
    for y in args.yml_files:
        yml.append(yaml.load(file(y)))

    try:
        count = 0
        for event in log:
            for ii in range(len(args.lcm_channels)):
                l = args.lcm_channels[ii]
                y = yml[ii]

                if event.channel == l:
                    lcm_msg = image_t.decode(event.data)

                    # Fill in image.
                    if lcm_msg.pixelformat != image_t.PIXEL_FORMAT_MJPEG:
                        print "Encountered non-MJPEG compressed image. Skipping..."
                        continue

                    ros_msg = CompressedImage()
                    ros_msg.header.seq = event.eventnum

                    secs_float = float(lcm_msg.utime)/1e6
                    nsecs_float = (secs_float - np.floor(secs_float)) * 1e9
                    ros_msg.header.stamp.secs = np.uint32(np.floor(secs_float))
                    ros_msg.header.stamp.nsecs = np.uint32(np.floor(nsecs_float))
                    ros_msg.header.frame_id = "camera"

                    ros_msg.format = 'jpeg'

                    ros_msg.data = lcm_msg.data

                    # Fill in camera info
                    camera_info = CameraInfo()
                    camera_info.header = ros_msg.header
                    camera_info.height = y['image_height']
                    camera_info.width = y['image_width']

                    if y["distortion_model"] != "plumb_bob":
                        print "Encountered non-supported distorion model %s. Skipping..." % y["distortion_model"]
                        continue

                    camera_info.distortion_model = y["distortion_model"]
                    camera_info.D = y["distortion_coefficients"]['data']
                    camera_info.K = y["camera_matrix"]['data']
                    camera_info.R = y["rectification_matrix"]['data']
                    camera_info.P = y["projection_matrix"]['data']
                    camera_info.binning_x = args.binning_x
                    camera_info.binning_y = args.binning_y
                    camera_info.roi.x_offset = args.x_offset
                    camera_info.roi.y_offset = args.y_offset
                    camera_info.roi.height = args.height
                    camera_info.roi.width = args.width
                    camera_info.roi.do_rectify = args.do_rectify

                    bag.write("/camera/" + l + "/image_raw/compressed", ros_msg, ros_msg.header.stamp)
                    bag.write("/camera/" + l + "/camera_info", camera_info, camera_info.header.stamp)

                    count += 1

                    if count % 100 == 0:
                        print "Wrote %i events" % count
    finally:
        log.close()
        bag.close()

    print("Done.")
예제 #59
0
파일: camera.py 프로젝트: DefaultUser/morse
def post_image(self, component_instance):
    """ Publish the data of the Camera as a ROS-Image message.

    """
    image_local = component_instance.local_data['image']
    if not image_local or image_local == '' or not image_local.image or not component_instance.capturing:
        return # press [Space] key to enable capturing

    parent_name = component_instance.robot_parent.blender_obj.name
    component_name = component_instance.blender_obj.name

    image = Image()
    image.header.stamp = rospy.Time.now()
    image.header.seq = self._seq
    # http://www.ros.org/wiki/geometry/CoordinateFrameConventions#Multi_Robot_Support
    image.header.frame_id = ('/' + parent_name + '/base_image')
    image.height = component_instance.image_height
    image.width = component_instance.image_width
    image.encoding = 'rgba8'
    image.step = image.width * 4
    # NOTE: Blender returns the image as a binary string encoded as RGBA
    # sensor_msgs.msg.Image.image need to be len() friendly
    # TODO patch ros-py3/common_msgs/sensor_msgs/src/sensor_msgs/msg/_Image.py
    # to be C-PyBuffer "aware" ? http://docs.python.org/c-api/buffer.html
    image.data = bytes(image_local.image)
    # http://wiki.blender.org/index.php/Dev:Source/GameEngine/2.49/VideoTexture
    # http://www.blender.org/documentation/blender_python_api_2_57_release/bge.types.html#bge.types.KX_Camera.useViewport

    for topic in self._topics:
        # publish the message on the correct topic
        if str(topic.name) == str("/" + parent_name + "/" + component_name + "/image"):
            topic.publish(image)

    # sensor_msgs/CameraInfo [ http://ros.org/wiki/rviz/DisplayTypes/Camera ]
    # TODO fill this 3 parameters to get correcty image with stereo camera
    Tx = 0
    Ty = 0
    R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

    intrinsic = component_instance.local_data['intrinsic_matrix']

    camera_info = CameraInfo()
    camera_info.header.stamp = image.header.stamp
    camera_info.header.seq = image.header.seq
    camera_info.header.frame_id = image.header.frame_id
    camera_info.height = image.height
    camera_info.width = image.width
    camera_info.distortion_model = 'plumb_bob'
    camera_info.K = [intrinsic[0][0], intrinsic[1][0], intrinsic[2][0], 
                     intrinsic[0][1], intrinsic[1][1], intrinsic[2][1], 
                     intrinsic[0][2], intrinsic[1][2], intrinsic[2][2]]
    camera_info.R = R
    camera_info.P = [intrinsic[0][0], intrinsic[1][0], intrinsic[2][0], Tx, 
                     intrinsic[0][1], intrinsic[1][1], intrinsic[2][1], Ty, 
                     intrinsic[0][2], intrinsic[1][2], intrinsic[2][2], 0]

    for topic in self._topics:
        # publish the message on the correct topic
        if str(topic.name) == str("/" + parent_name + "/" + component_name + "/camera_info"):
            topic.publish(camera_info)

    self._seq = self._seq + 1
예제 #60
-1
    def default(self, ci='unused'):
        """ Publish the data of the Camera as a ROS Image message. """
        if not self.component_instance.capturing:
            return # press [Space] key to enable capturing

        image_local = self.data['image']

        image = Image()
        image.header = self.get_ros_header()
        image.header.frame_id += '/base_image'
        image.height = self.component_instance.image_height
        image.width = self.component_instance.image_width
        image.encoding = 'rgba8'
        image.step = image.width * 4

        # VideoTexture.ImageRender implements the buffer interface
        image.data = bytes(image_local)

        # sensor_msgs/CameraInfo [ http://ros.org/wiki/rviz/DisplayTypes/Camera ]
        # fill this 3 parameters to get correcty image with stereo camera
        Tx = 0
        Ty = 0
        R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

        intrinsic = self.data['intrinsic_matrix']

        camera_info = CameraInfo()
        camera_info.header = image.header
        camera_info.height = image.height
        camera_info.width = image.width
        camera_info.distortion_model = 'plumb_bob'
        camera_info.K = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2],
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2],
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2]]
        camera_info.R = R
        camera_info.P = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2], Tx,
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2], Ty,
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2], 0]

        self.publish(image)
        self.topic_camera_info.publish(camera_info)