def info_480p():

    data = CameraInfo()

    data.height = 480
    data.width = 640

    data.distortion_model = "plumb_bob"

    data.D = [-0.297117, 0.070173, -0.000390, 0.005232, 0.000000]

    data.K = [
        342.399061, 0.000000, 284.222700, 0.000000, 343.988302, 227.555237,
        0.000000, 0.000000, 1.000000
    ]

    data.R = [
        1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000,
        0.000000, 1.000000
    ]

    data.P = [
        247.555710, 0.000000, 290.282918, 0.000000, 0.000000, 281.358612,
        220.776009, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000
    ]

    data.binning_x = 0
    data.binning_y = 0
    data.roi.x_offset = 0
    data.roi.y_offset = 0
    data.roi.height = 0
    data.roi.width = 0
    data.roi.do_rectify = False

    return data
Пример #2
0
    def camera_info(self):
        msg_header = Header()
        msg_header.frame_id = "f450/robot_camera_down"
        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0

        msg = CameraInfo()
        msg.header = msg_header
        msg.height = 480
        msg.width = 640
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [1.0, 0.0, 320.5, 0.0, 1.0, 240.5, 0.0, 0.0, 1.0]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [
            1.0, 0.0, 320.5, -0.0, 0.0, 1.0, 240.5, 0.0, 0.0, 0.0, 1.0, 0.0
        ]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi
        return msg
def publishing(pub_image, pub_camera, image, type_of_camera):
    if type_of_camera is 1:
        image.convert(carla.ColorConverter.Depth)
    elif type_of_camera is 2:
        image.convert(carla.ColorConverter.CityScapesPalette)
    array = np.frombuffer(image.raw_data, dtype=np.uint8)
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]
    img = Image()
    infomsg = CameraInfo()

    img.header.stamp = rospy.Time.now()
    img.header.frame_id = 'base'
    img.height = infomsg.height = image.height
    img.width = infomsg.width = image.width
    img.encoding = "rgb8"
    img.step = img.width * 3 * 1
    st1 = array.tostring()
    img.data = st1

    cx = infomsg.width / 2.0
    cy = infomsg.height / 2.0
    fx = fy = infomsg.width / (2.0 * math.tan(image.fov * math.pi / 360.0))
    infomsg.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]
    infomsg.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1, 0]
    infomsg.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]
    infomsg.D = [0, 0, 0, 0, 0]
    infomsg.binning_x = 0
    infomsg.binning_y = 0
    infomsg.distortion_model = "plumb_bob"
    infomsg.header = img.header

    pub_image.publish(img)
    pub_camera.publish(infomsg)
Пример #4
0
    def rosmsg(self):
        """:obj:`sensor_msgs.CamerInfo` : Returns ROS CamerInfo msg 
        """
        msg_header = Header()
        msg_header.frame_id = self._frame

        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0

        msg = CameraInfo()
        msg.header = msg_header
        msg.height = self._height
        msg.width = self._width
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [
            self._fx, 0.0, self._cx, 0.0, self._fy, self._cy, 0.0, 0.0, 1.0
        ]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [
            self._fx, 0.0, self._cx, 0.0, 0.0, self._fx, self._cy, 0.0, 0.0,
            0.0, 1.0, 0.0
        ]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi

        return msg
def info_720p():

    data = CameraInfo()

    data.height = 720
    data.width = 1280

    data.distortion_model = "plumb_bob"

    data.D = [-0.266169, 0.056566, 0.003569, 0.002922, 0.000000]

    data.K = [
        550.515474, 0.000000, 560.486530, 0.000000, 550.947091, 334.377088,
        0.000000, 0.000000, 1.000000
    ]

    data.R = [
        1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000,
        0.000000, 1.000000
    ]

    data.P = [
        395.931458, 0.000000, 570.158643, 0.000000, 0.000000, 474.049377,
        329.846866, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000
    ]

    data.binning_x = 0
    data.binning_y = 0
    data.roi.x_offset = 0
    data.roi.y_offset = 0
    data.roi.height = 0
    data.roi.width = 0
    data.roi.do_rectify = False

    return data
Пример #6
0
    def write_camera(self, folder, topic, frame, ext):

        print(' Writing camera: ' + topic)

        records = self.read_csv(self.path + self.name + '/' + folder + '/' +
                                'timestamps.txt')
        bridge = cv_bridge.CvBridge()

        seq = 0
        for row in tqdm(records):

            filename = row[0] + ext
            timestamp = row[1]

            image_path = self.path + self.name + '/' + folder + '/' + filename
            img = cv2.imread(image_path)

            encoding = "bgr8"
            image_message = bridge.cv2_to_imgmsg(img, encoding=encoding)

            image_message.header.frame_id = frame
            image_message.header.stamp = self.timestamp_to_stamp(timestamp)
            image_message.header.seq = seq
            seq += 1

            self.bag.write(topic + '/camera',
                           image_message,
                           t=image_message.header.stamp)

            camera_info = CameraInfo()
            camera_info.header = image_message.header
            camera_info.height = img.shape[0]
            camera_info.width = img.shape[1]
            camera_info.distortion_model = "plumb_bob"
            camera_info.D = [
                -0.15402600433198144, 0.08558850995478451,
                0.002075813671243975, 0.0006580423624898167,
                -0.016293022125192957
            ]
            camera_info.K = [
                1376.8981317210023, 0.0, 957.4934213691823, 0.0,
                1378.3903002987945, 606.5795886217022, 0.0, 0.0, 1.0
            ]
            camera_info.R = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
            camera_info.P = [
                1376.8981317210023, 0.0, 957.4934213691823, 0.0, 0.0,
                1378.3903002987945, 606.5795886217022, 0.0, 0.0, 0.0, 1.0, 0.0
            ]
            camera_info.binning_x = 1
            camera_info.binning_y = 1

            self.bag.write(topic + '/camera_info',
                           image_message,
                           t=image_message.header.stamp)
Пример #7
0
    def cameraInfoCallback(self, input_msg):
        if self.tf_broadcaster is None:
            self.setUpTfBroadcaster(input_msg.header.frame_id,
                                    input_msg.header.stamp)

        output_msg = CameraInfo()
        output_msg.header = input_msg.header
        output_msg.header.frame_id = rotated_frame_id(
            input_msg.header.frame_id)
        output_msg.width = input_msg.height
        output_msg.height = input_msg.width
        output_msg.distortion_model = input_msg.distortion_model
        # Unclear what to do with D. Luckily, we work with virtual cameras
        # without distortion.
        output_msg.D = input_msg.D
        output_msg.K[0] = input_msg.K[4]
        output_msg.K[1] = 0
        output_msg.K[2] = input_msg.K[5]
        output_msg.K[3] = 0
        output_msg.K[4] = input_msg.K[0]
        output_msg.K[5] = input_msg.K[2]
        output_msg.K[6] = 0
        output_msg.K[7] = 0
        output_msg.K[8] = 1
        output_msg.R = input_msg.R
        output_msg.P[0] = input_msg.P[5]
        output_msg.P[1] = 0
        output_msg.P[2] = input_msg.P[6]
        output_msg.P[3] = 0  #input_msg.P[7]
        output_msg.P[4] = 0
        output_msg.P[5] = input_msg.P[0]
        output_msg.P[6] = input_msg.P[2]
        output_msg.P[7] = 0  #input_msg.P[3]
        output_msg.P[8] = 0
        output_msg.P[9] = 0
        output_msg.P[10] = 1
        output_msg.P[11] = 0

        # Probably like this? In Virtual, both values are zero.
        output_msg.binning_x = input_msg.binning_y
        output_msg.binning_y = input_msg.binning_x

        output_msg.roi.x_offset = input_msg.roi.y_offset
        output_msg.roi.y_offset = input_msg.roi.x_offset
        output_msg.roi.height = input_msg.roi.width
        output_msg.roi.width = input_msg.roi.height
        output_msg.roi.do_rectify = input_msg.roi.do_rectify

        self.camera_info_publisher.publish(output_msg)
Пример #8
0
     def camera_info_left(self):
        msg_header = Header()
        msg_header.frame_id = "uav_camera_down"
        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0

        msg = CameraInfo()
        msg.header = msg_header
        msg.height = 480
        msg.width = 640
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [1.0, 0.0, 320.5, 0.0, 1.0, 240.5, 0.0, 0.0, 1.0]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [1.0, 0.0, 320.5, -0.0, 0.0, 1.0, 240.5, 0.0, 0.0, 0.0, 1.0, 0.0]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi
        return msg
        
        def camera_info_right(self):
        msg_header = Header()
        msg_header.frame_id = "uav_camera_down"
        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0

        msg = CameraInfo()
        msg.header = msg_header
        msg.height = 480
        msg.width = 640
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [1.0, 0.0, 320.5, 0.0, 1.0, 240.5, 0.0, 0.0, 1.0]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [1.0, 0.0, 320.5, -0.0, 0.0, 1.0, 240.5, 0.0, 0.0, 0.0, 1.0, 0.0]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi
        return msg
        def depth_estm(li,ri)
            stereo = cv2.StereoBM_create(numDisparities=16, blockSize=5)
Пример #9
0
def GetCameraInfo(width, height):
	cam_info = CameraInfo()
	cam_info.width = width
	cam_info.height = height
	cam_info.distortion_model = model
	#cam_info.D = [0.0]*5
	#cam_info.K = [0.0]*9
	#cam_info.R = [0.0]*9
	#cam_info.P = [0.0]*12
	cam_info.D = D
        cam_info.K = K
        cam_info.R = R
        cam_info.P = P
	cam_info.binning_x = 0
	cam_info.binning_y = 0
	return cam_info
Пример #10
0
def get_default_camera_info_msg(imageSize = [1280, 720], frame_id = "FI"):
    tempCameraInfoMsg = CameraInfo()
    tempCameraInfoMsg.height = imageSize[1]
    tempCameraInfoMsg.width  = imageSize[0]
    tempCameraInfoMsg.distortion_model = ""
    tempCameraInfoMsg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
    tempCameraInfoMsg.K = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    tempCameraInfoMsg.R = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    tempCameraInfoMsg.P = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    tempCameraInfoMsg.binning_x = 0
    tempCameraInfoMsg.binning_y = 0
    tempCameraInfoMsg.roi.x_offset   = 0
    tempCameraInfoMsg.roi.y_offset   = 0
    tempCameraInfoMsg.roi.height     = 0
    tempCameraInfoMsg.roi.width      = 0
    tempCameraInfoMsg.roi.do_rectify = False
    tempCameraInfoMsg.header.frame_id = frame_id

    return tempCameraInfoMsg
def parse_yaml(filename):
    stream = file(filename, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    cam_info.binning_x = calib_data['binning_x']
    cam_info.binning_y = calib_data['binning_y']
    cam_info.roi.x_offset = calib_data['roi']['x_offset']
    cam_info.roi.y_offset = calib_data['roi']['y_offset']
    cam_info.roi.height = calib_data['roi']['height']
    cam_info.roi.width = calib_data['roi']['width']
    cam_info.roi.do_rectify = calib_data['roi']['do_rectify']
    
    return cam_info
Пример #12
0
def parse_yaml(filename):
    stream = file(filename, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    cam_info.binning_x = calib_data['binning_x']
    cam_info.binning_y = calib_data['binning_y']
    cam_info.roi.x_offset = calib_data['roi']['x_offset']
    cam_info.roi.y_offset = calib_data['roi']['y_offset']
    cam_info.roi.height = calib_data['roi']['height']
    cam_info.roi.width = calib_data['roi']['width']
    cam_info.roi.do_rectify = calib_data['roi']['do_rectify']

    return cam_info
def parse_yaml(filename):
    stream = file(filename, "r")
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data["image_width"]
    cam_info.height = calib_data["image_height"]
    cam_info.K = calib_data["camera_matrix"]["data"]
    cam_info.D = calib_data["distortion_coefficients"]["data"]
    cam_info.R = calib_data["rectification_matrix"]["data"]
    cam_info.P = calib_data["projection_matrix"]["data"]
    cam_info.distortion_model = calib_data["distortion_model"]
    cam_info.binning_x = calib_data["binning_x"]
    cam_info.binning_y = calib_data["binning_y"]
    cam_info.roi.x_offset = calib_data["roi"]["x_offset"]
    cam_info.roi.y_offset = calib_data["roi"]["y_offset"]
    cam_info.roi.height = calib_data["roi"]["height"]
    cam_info.roi.width = calib_data["roi"]["width"]
    cam_info.roi.do_rectify = calib_data["roi"]["do_rectify"]

    return cam_info
Пример #14
0
    def run(self):

        self.seq = 0

        cameraInfoTopic = rospy.get_param('~camera_info_topic')
        frameID = rospy.get_param('~frameid')

        cameraInfoPublisher = rospy.Publisher(cameraInfoTopic,
                                              CameraInfo,
                                              queue_size=1)

        cameraInfo = CameraInfo()
        cameraInfo.header.seq = self.seq
        self.seq += 1
        cameraInfo.header.stamp = rospy.get_rostime()
        cameraInfo.header.frame_id = frameID
        cameraInfo.height = rospy.get_param('~height')
        cameraInfo.width = rospy.get_param('~width')
        cameraInfo.distortion_model = "plumb_bob"
        cameraInfo.D = rospy.get_param('~D')
        cameraInfo.K = rospy.get_param('~K')
        cameraInfo.R = rospy.get_param('~R')
        cameraInfo.P = rospy.get_param('~P')
        cameraInfo.binning_x = 0
        cameraInfo.binning_y = 0
        cameraInfo.roi.x_offset = 0
        cameraInfo.roi.y_offset = 0
        cameraInfo.roi.height = 0
        cameraInfo.roi.width = 0
        cameraInfo.roi.do_rectify = False

        rate = rospy.Rate(1)

        while not rospy.is_shutdown():

            cameraInfo.header.seq = self.seq
            self.seq += 1

            cameraInfoPublisher.publish(cameraInfo)

            rate.sleep()
def construct_info(header: Header, info: SimCameraInfo, height: int, width: int) -> CameraInfo:
    msg = CameraInfo()

    Tx = 0.0  # Assumed for now since we are not using stereo
    hfov = np.deg2rad(info.fov)

    # https://github.com/microsoft/AirSim-NeurIPS2019-Drone-Racing/issues/86
    f = width / (2 * np.tan(0.5 * hfov))
    Fx = Fy = f
    cx = width / 2
    cy = height / 2

    K = np.array([
        [Fx,  0.0, cx],
        [0.0, Fy,  cy],
        [0.0, 0.0, 1 ]
    ]).flatten()

    R = np.array([
        [0.0, 0.0, 0.0],
        [0.0, 0.0, 0.0],
        [0.0, 0.0, 0.0]
    ]).flatten()

    P = np.array([
        [Fx,  0.0, cx,  Tx ],
        [0.0, Fy,  cy,  0.0],
        [0.0, 0.0, 1.0, 0.0]
    ]).flatten()

    msg.header = header
    msg.height = height
    msg.width = width
    msg.k = K
    msg.r = R
    msg.p = P
    msg.binning_x = 0
    msg.binning_y = 0

    return msg
def info_param():

    data = CameraInfo()

    data.height = cut_idx[2] - cut_idx[1]
    data.width = cut_idx[4] - cut_idx[3]
    data.distortion_model = "plumb_bob"
    data.D = [0.0, 0.0, 0.0, 0.0, 0.0]
    data.K = [1324.1, 0.0, 808.6, 0.0, 1324.1, 171.7, 0.0, 0.0, 1.0]
    data.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
    data.P = [
        1324.1, 0.0, 808.6, 0.0, 0.0, 1324.1, 171.7, 0.0, 0.0, 0.0, 1.0, 0.0
    ]
    data.binning_x = 0
    data.binning_y = 0
    data.roi.x_offset = 0
    data.roi.y_offset = 0
    data.roi.height = 0
    data.roi.width = 0
    data.roi.do_rectify = False

    return data
Пример #17
0
def get_camera_info(hard_coded=True):

    if hard_coded:
        cx = 319.5
        cy = 239.5
        fx = 525.5
        fy = 525.5

        return (cx, cy, fx, fy)

    #if we are using a different camera, then
    #we can listen to the ros camera info topic for that device
    #and get our values here.
    else:

        import image_geometry
        from sensor_msgs.msg import CameraInfo

        cam_info = CameraInfo()

        cam_info.height = 480
        cam_info.width = 640
        cam_info.distortion_model = "plumb_bob"
        cam_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        cam_info.K = [525.0, 0.0, 319.5, 0.0, 525.0, 239.5, 0.0, 0.0, 1.0]
        cam_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        cam_info.P = [525.0, 0.0, 319.5, 0.0, 0.0, 525.0, 239.5, 0.0, 0.0, 0.0, 1.0, 0.0]
        cam_info.binning_x = 0
        cam_info.binning_y = 0
        cam_info.roi.x_offset = 0
        cam_info.roi.y_offset = 0
        cam_info.roi.height = 0
        cam_info.roi.width = 0
        cam_info.roi.do_rectify = False

        camera_model = image_geometry.PinholeCameraModel()
        camera_model.fromCameraInfo(cam_info)

        return camera_model.cx(), camera_model.cy(), camera_model.fx(), camera_model.fy()
Пример #18
0
    def camera_info_B62241(self):
        camera_info = CameraInfo()
        camera_info.height = 720
        camera_info.width = 960
        camera_info.distortion_model = 'plumb_bob'
        camera_info.D = [-0.026147, 0.071036, -0.001191, 0.000034, 0.000000]
        #     [fx  0 cx]
        # K = [ 0 fy cy]
        #     [ 0  0  1]
        camera_info.K = [
            936.038281, 0.000000, 489.072492, 0.000000, 934.410240, 355.865672,
            0.000000, 0.000000, 1.000000
        ]
        # camera_info.K = [0.0, 0.000000, 0.0, 0.000000, 0.0, 0.0, 0.000000, 0.000000, 0.000000]

        # Rectification matrix (stereo cameras only)
        # A rotation matrix aligning the camera coordinate system to the ideal
        # stereo image plane so that epipolar lines in both stereo images are
        # parallel.
        camera_info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]  # 3x3 row-major matrix
        # Projection/camera matrix
        #     [fx'  0  cx' Tx]
        # P = [ 0  fy' cy' Ty]
        #     [ 0   0   1   0]
        camera_info.P = [
            937.920654, 0.000000, 488.955720, 0.000000, 0.000000, 935.798462,
            355.122410, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000
        ]
        # Binning refers here to any camera setting which combines rectangular
        #  neighborhoods of pixels into larger "super-pixels." It reduces the
        #  resolution of the output image to
        #  (width / binning_x) x (height / binning_y).
        # The default values binning_x = binning_y = 0 is considered the same
        #  as binning_x = binning_y = 1 (no subsampling).
        camera_info.binning_x = 1
        camera_info.binning_y = 1
        # camera_info.roi.do_rectify = True
        return camera_info
Пример #19
0
    def camera_info_58B4D9(self):
        camera_info = CameraInfo()
        camera_info.height = 720
        camera_info.width = 960
        camera_info.distortion_model = 'plumb_bob'
        camera_info.D = [-0.009413, 0.051197, 0.000404, 0.001424, 0.000000]
        #     [fx  0 cx]
        # K = [ 0 fy cy]
        #     [ 0  0  1]
        camera_info.K = [
            920.424491, 0.000000, 483.628078, 0.000000, 918.646459, 359.107873,
            0.000000, 0.000000, 1.000000
        ]
        # camera_info.K = [0.0, 0.000000, 0.0, 0.000000, 0.0, 0.0, 0.000000, 0.000000, 0.000000]

        # Rectification matrix (stereo cameras only)
        # A rotation matrix aligning the camera coordinate system to the ideal
        # stereo image plane so that epipolar lines in both stereo images are
        # parallel.
        camera_info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]  # 3x3 row-major matrix
        # Projection/camera matrix
        #     [fx'  0  cx' Tx]
        # P = [ 0  fy' cy' Ty]
        #     [ 0   0   1   0]
        camera_info.P = [
            925.399780, 0.000000, 484.792705, 0.000000, 0.000000, 924.518188,
            359.378385, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000
        ]
        # Binning refers here to any camera setting which combines rectangular
        #  neighborhoods of pixels into larger "super-pixels." It reduces the
        #  resolution of the output image to
        #  (width / binning_x) x (height / binning_y).
        # The default values binning_x = binning_y = 0 is considered the same
        #  as binning_x = binning_y = 1 (no subsampling).
        camera_info.binning_x = 1
        camera_info.binning_y = 1
        # camera_info.roi.do_rectify = True
        return camera_info
Пример #20
0
    def camera_info_B5D81A(self):
        camera_info = CameraInfo()
        camera_info.height = 720
        camera_info.width = 960
        camera_info.distortion_model = 'plumb_bob'
        camera_info.D = [-0.013707, 0.057347, 0.001189, -0.000620, 0.000000]
        #     [fx  0 cx]
        # K = [ 0 fy cy]
        #     [ 0  0  1]
        camera_info.K = [
            932.514995, 0.000000, 509.230011, 0.000000, 930.694048, 363.217342,
            0.000000, 0.000000, 1.000000
        ]

        # Rectification matrix (stereo cameras only)
        # A rotation matrix aligning the camera coordinate system to the ideal
        # stereo image plane so that epipolar lines in both stereo images are
        # parallel.
        camera_info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]  # 3x3 row-major matrix
        # Projection/camera matrix
        #     [fx'  0  cx' Tx]
        # P = [ 0  fy' cy' Ty]
        #     [ 0   0   1   0]
        camera_info.P = [
            937.264587, 0.000000, 508.215333, 0.000000, 0.000000, 936.094482,
            363.984413, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000
        ]
        # Binning refers here to any camera setting which combines rectangular
        #  neighborhoods of pixels into larger "super-pixels." It reduces the
        #  resolution of the output image to
        #  (width / binning_x) x (height / binning_y).
        # The default values binning_x = binning_y = 0 is considered the same
        #  as binning_x = binning_y = 1 (no subsampling).
        camera_info.binning_x = 1
        camera_info.binning_y = 1
        # camera_info.roi.do_rectify = True
        return camera_info
Пример #21
0
def callback2(msg):
    camMsg = CameraInfo()
    camMsg.header.frame_id = msg.frameID
    camMsg.header.stamp.secs = int(msg.secs)
    camMsg.header.stamp.nsecs = int(msg.nsecs)
    #print("camMsg.stamp: " + str(camMsg.header.stamp))
    camMsg.distortion_model = msg.distortion_model
    camMsg.height = msg.height
    camMsg.width = msg.width

    camMsg.D = [
        -0.026931360455819606, 0.2555749750874423, -0.004149575421355287,
        0.0023300651348101995, 0.0
    ]

    camMsg.K = [
        3030.4197934830086, 0.0, 1920.215344558882, 0.0, 3042.865729970712,
        1003.0127838205526, 0.0, 0.0, 1.0
    ]
    camMsg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
    camMsg.P = [
        3030.419793483006, 0.0, 1920.215344558882, 0.0, 0.0, 3042.865729970712,
        1003.0127838205526, 0.0, 0.0, 0.0, 1.0, 0.0
    ]
    '''camMsg.D = [0.6679244637489319, -2.6938982009887695, 0.0009035157854668796, -0.0001931091828737408, 1.4591891765594482, 0.5460429191589355, -2.528550148010254, 1.3961073160171509]
    camMsg.K = [980.1734619140625, 0.0, 1024.3494873046875, 0.0, 979.7545166015625, 782.9638671875, 0.0, 0.0, 1.0]
    camMsg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
    camMsg.P = [980.1734619140625, 0.0, 1024.3494873046875, 0.0, 0.0, 979.7545166015625, 782.9638671875, 0.0, 0.0, 0.0, 1.0, 0.0]
'''

    camMsg.binning_x = msg.binning_x
    camMsg.binning_y = msg.binning_y
    global cam_info
    cam_info = camMsg
    #caminfo_pub.publish(camMsg)
    print("camera info published")
Пример #22
0
    def camera_info_57A6A7(self):
        camera_info = CameraInfo()
        camera_info.height = 720
        camera_info.width = 960
        camera_info.distortion_model = 'plumb_bob'
        camera_info.D = [-0.034749, 0.071514, 0.000363, 0.003131, 0]
        #     [fx  0 cx]
        # K = [ 0 fy cy]
        #     [ 0  0  1]
        camera_info.K = [
            924.873180, 0, 486.997346, 0, 923.504522, 364.308527, 0, 0, 1
        ]

        # Rectification matrix (stereo cameras only)
        # A rotation matrix aligning the camera coordinate system to the ideal
        # stereo image plane so that epipolar lines in both stereo images are
        # parallel.
        camera_info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]  # 3x3 row-major matrix
        # Projection/camera matrix
        #     [fx'  0  cx' Tx]
        # P = [ 0  fy' cy' Ty]
        #     [ 0   0   1   0]
        camera_info.P = [
            921.967102, 0, 489.492281, 0, 0, 921.018890, 364.508536, 0, 0, 0,
            1.000000, 0
        ]
        # Binning refers here to any camera setting which combines rectangular
        #  neighborhoods of pixels into larger "super-pixels." It reduces the
        #  resolution of the output image to
        #  (width / binning_x) x (height / binning_y).
        # The default values binning_x = binning_y = 0 is considered the same
        #  as binning_x = binning_y = 1 (no subsampling).
        camera_info.binning_x = 1
        camera_info.binning_y = 1
        # camera_info.roi.do_rectify = True
        return camera_info
Пример #23
0
    334.3432506979186, 0.0, 258.96387838003477, 0.0, 329.156146359108,
    257.21497195869205, 0.0, 0.0, 1.0
]
rightCamInfo.R = [
    0.999662592692041, 0.01954381929425834, -0.017109643468520532,
    -0.019640898676970494, 0.9997918363216206, -0.0055244116250247315,
    0.016998113759693796, 0.005858596421934612, 0.9998383574241275
]
rightCamInfo.P = [
    372.1279396837233, 0.0, 276.3964099884033, 135.43805496802958, 0.0,
    372.1279396837233, 262.72875595092773, 0.0, 0.0, 0.0, 1.0, 0.0
]
rightCamInfo.distortion_model = "plumb_bob"
rightCamInfo.width = 300
rightCamInfo.height = 300
rightCamInfo.binning_x = 0
rightCamInfo.binning_y = 0

sensor_id = 0,
sensor_mode = 3,
capture_width = 1280,
capture_height = 720,
display_width = 1280,
display_height = 720,
framerate = 30,
flip_method = 0,
"""3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;"""

gst_pipeline = (
    "nvarguscamerasrc sensor-id=0 sensor-mode=0 ! "
    "video/x-raw(memory:NVMM), "
Пример #24
0
    def run(self):
        img = Image()
        r = rospy.Rate(self.config['frame_rate'])
        while self.is_looping():
            if self.pub_img_.get_num_connections() == 0:
                if self.nameId:
                    rospy.loginfo('Unsubscribing from camera as nobody listens to the topics.')
                    self.camProxy.unsubscribe(self.nameId)
                    self.nameId = None
                r.sleep()
                continue
            if self.nameId is None:
                self.reconfigure(self.config, 0)
                r.sleep()
                continue
            image = self.camProxy.getImageRemote(self.nameId)
            if image is None:
                continue
            # Deal with the image
            if self.config['use_ros_time']:
                img.header.stamp = rospy.Time.now()
            else:
                img.header.stamp = rospy.Time(image[4] + image[5]*1e-6)
            img.header.frame_id = self.frame_id
            img.height = image[1]
            img.width = image[0]
            nbLayers = image[2]
            if image[3] == kYUVColorSpace:
                encoding = "mono8"
            elif image[3] == kRGBColorSpace:
                encoding = "rgb8"
            elif image[3] == kBGRColorSpace:
                encoding = "bgr8"
            elif image[3] == kYUV422ColorSpace:
                encoding = "yuv422" # this works only in ROS groovy and later
            elif image[3] == kDepthColorSpace:
                encoding = "mono16"
            else:
                rospy.logerr("Received unknown encoding: {0}".format(image[3]))
            img.encoding = encoding
            img.step = img.width * nbLayers
            img.data = image[6]

            self.pub_img_.publish(img)

            # deal with the camera info
            if self.config['source'] == kDepthCamera and image[3] == kDepthColorSpace:
                infomsg = CameraInfo()
                # yes, this is only for an XTion / Kinect but that's the only thing supported by NAO
                ratio_x = float(640)/float(img.width)
                ratio_y = float(480)/float(img.height)
                infomsg.width = img.width
                infomsg.height = img.height
                # [ 525., 0., 3.1950000000000000e+02, 0., 525., 2.3950000000000000e+02, 0., 0., 1. ]
                infomsg.K = [ 525, 0, 3.1950000000000000e+02,
                              0, 525, 2.3950000000000000e+02,
                              0, 0, 1 ]
                infomsg.P = [ 525, 0, 3.1950000000000000e+02, 0,
                              0, 525, 2.3950000000000000e+02, 0,
                              0, 0, 1, 0 ]
                for i in range(3):
                    infomsg.K[i] = infomsg.K[i] / ratio_x
                    infomsg.K[3+i] = infomsg.K[3+i] / ratio_y
                    infomsg.P[i] = infomsg.P[i] / ratio_x
                    infomsg.P[4+i] = infomsg.P[4+i] / ratio_y

                infomsg.D = []
                infomsg.binning_x = 0
                infomsg.binning_y = 0
                infomsg.distortion_model = ""

                infomsg.header = img.header
                self.pub_info_.publish(infomsg)
            elif self.config['camera_info_url'] in self.camera_infos:
                infomsg = self.camera_infos[self.config['camera_info_url']]

                infomsg.header = img.header
                self.pub_info_.publish(infomsg)

            r.sleep()

        if (self.nameId):
            rospy.loginfo("unsubscribing from camera ")
            self.camProxy.unsubscribe(self.nameId)
Пример #25
0
    def run(self):
        img = Image()
        r = rospy.Rate(self.config['frame_rate'])
        while self.is_looping():
            if self.pub_img_.get_num_connections() == 0:
                if self.nameId:
                    rospy.loginfo(
                        'Unsubscribing from camera as nobody listens to the topics.'
                    )
                    self.camProxy.unsubscribe(self.nameId)
                    self.nameId = None
                r.sleep()
                continue
            if self.nameId is None:
                self.reconfigure(self.config, 0)
                r.sleep()
                continue
            image = self.camProxy.getImageRemote(self.nameId)
            if image is None:
                continue
            # Deal with the image
            if self.config['use_ros_time']:
                img.header.stamp = rospy.Time.now()
            else:
                img.header.stamp = rospy.Time(image[4] + image[5] * 1e-6)
            img.header.frame_id = self.frame_id
            img.height = image[1]
            img.width = image[0]
            nbLayers = image[2]
            if image[3] == kYUVColorSpace:
                encoding = "mono8"
            elif image[3] == kRGBColorSpace:
                encoding = "rgb8"
            elif image[3] == kBGRColorSpace:
                encoding = "bgr8"
            elif image[3] == kYUV422ColorSpace:
                encoding = "yuv422"  # this works only in ROS groovy and later
            elif image[3] == kDepthColorSpace:
                encoding = "mono16"
            else:
                rospy.logerr("Received unknown encoding: {0}".format(image[3]))
            img.encoding = encoding
            img.step = img.width * nbLayers
            img.data = image[6]

            self.pub_img_.publish(img)

            # deal with the camera info
            if self.config['source'] == kDepthCamera and image[
                    3] == kDepthColorSpace:
                infomsg = CameraInfo()
                # yes, this is only for an XTion / Kinect but that's the only thing supported by NAO
                ratio_x = float(640) / float(img.width)
                ratio_y = float(480) / float(img.height)
                infomsg.width = img.width
                infomsg.height = img.height
                # [ 525., 0., 3.1950000000000000e+02, 0., 525., 2.3950000000000000e+02, 0., 0., 1. ]
                infomsg.K = [
                    525, 0, 3.1950000000000000e+02, 0, 525,
                    2.3950000000000000e+02, 0, 0, 1
                ]
                infomsg.P = [
                    525, 0, 3.1950000000000000e+02, 0, 0, 525,
                    2.3950000000000000e+02, 0, 0, 0, 1, 0
                ]
                for i in range(3):
                    infomsg.K[i] = infomsg.K[i] / ratio_x
                    infomsg.K[3 + i] = infomsg.K[3 + i] / ratio_y
                    infomsg.P[i] = infomsg.P[i] / ratio_x
                    infomsg.P[4 + i] = infomsg.P[4 + i] / ratio_y

                infomsg.D = []
                infomsg.binning_x = 0
                infomsg.binning_y = 0
                infomsg.distortion_model = ""
                self.pub_info_.publish(infomsg)

            elif self.config['camera_info_url'] in self.camera_infos:
                infomsg = self.camera_infos[self.config['camera_info_url']]
                infomsg.header = img.header
                self.pub_info_.publish(infomsg)

            r.sleep()

        if (self.nameId):
            rospy.loginfo("unsubscribing from camera ")
            self.camProxy.unsubscribe(self.nameId)
Пример #26
0
                      0.0,
                      1.0]
  f_cam_info_msg.P = [
      382.595764,
      0.0,
      320.434540,
      0.0,
      0.0,
      382.595764,
      242.255585,
      0.0,
      0.0,
      0.0,
      1.0,
      0.0
  ]
  f_cam_info_msg.binning_x = 0
  f_cam_info_msg.binning_y = 0
  f_cam_info_msg.roi.x_offset = 0
  f_cam_info_msg.roi.y_offset = 0
  f_cam_info_msg.roi.height = 0
  f_cam_info_msg.roi.width = 0
  f_cam_info_msg.roi.do_rectify = False

  rate = rospy.Rate(10)
  while not rospy.is_shutdown():
    f_cam_info_pub.publish(f_cam_info_msg)
    # b_cam_info_pub.publish(f_cam_info_msg)
    rate.sleep()
    # rospy.spin()
Пример #27
0
    536.5758268477813, 0.0, 247.44670033531492, 0.0, 531.3262582465451,
    262.0510590778803, 0.0, 0.0, 1.0
]
leftCamInfo.R = [
    0.9987845570311966, -0.013670814378410176, -0.04735522642990731,
    0.01394021992417426, 0.9998884456533916, 0.0053634426733278435,
    0.04727662111934815, -0.006017065985633907, 0.998863712431512
]
leftCamInfo.P = [
    572.1279396837233, 0.0, 276.3964099884033, 0.0, 0.0, 572.1279396837233,
    262.72875595092773, 0.0, 0.0, 0.0, 1.0, 0.0
]
leftCamInfo.distortion_model = "plumb_bob"
leftCamInfo.width = 300
leftCamInfo.height = 300
leftCamInfo.binning_x = 0
leftCamInfo.binning_y = 0

sensor_id = 0,
sensor_mode = 3,
capture_width = 1280,
capture_height = 720,
display_width = 1280,
display_height = 720,
framerate = 30,
flip_method = 0,

gst_pipeline = (
    "nvarguscamerasrc sensor-id=1 sensor-mode=0 ! "
    "video/x-raw(memory:NVMM), "
    "width=(int)3264, height=(int)2464, "
def main():
    parser = argparse.ArgumentParser(
        description="Convert an LCM log to a ROS bag (mono/stereo images only).")
    parser.add_argument('lcm_file', help='Input LCM log.', action='store')
    parser.add_argument('left_img_channel', help='LCM channel for left image.')
    parser.add_argument('left_camera_yml',
                        help='Image calibration YAML file from ROS calibrator')
    parser.add_argument('--right_img_channel', help='LCM channel for right image.',
                        action='append', dest='lcm_channels')
    parser.add_argument('--right_camera_yml',
                        help='Image calibration YAML file from ROS calibrator',
                        action='append', dest='yml_files')

    roi_parser = parser.add_argument_group("Format7/ROI", "Format7/ROI options needed when dealing with non-standard video modes.")
    roi_parser.add_argument('--binning_x', default=1, type=int, dest='binning_x', help='Image binning factor.')
    roi_parser.add_argument('--binning_y', default=1, type=int, dest='binning_y', help='Image binning factor.')
    roi_parser.add_argument('--x_offset', default=0, type=int, dest='x_offset', help="ROI x offset (in UNBINNED pixels)")
    roi_parser.add_argument('--y_offset', default=0, type=int, dest='y_offset', help="ROI y offset (in UNBINNED pixels)")
    roi_parser.add_argument('--width', default=640, type=int, dest='width', help="ROI width (in UNBINNED pixels)")
    roi_parser.add_argument('--height', default=480, type=int, dest='height', help="ROI height (in UNBINNED pixels)")
    roi_parser.add_argument('--do_rectify', default=False, type=bool, dest='do_rectify', help="Do rectification when querying ROI.")
    args = parser.parse_args()

    if args.lcm_channels is None:
        args.lcm_channels = []
    if args.yml_files is None:
        args.yml_files = []

    args.lcm_channels.append(args.left_img_channel)
    args.yml_files.append(args.left_camera_yml)

    if len(args.lcm_channels) != len(args.yml_files):
        print "LCM channel-YAML file mismatch!"

    print "Converting images in %s to ROS bag file..." % (args.lcm_file)

    log = lcm.EventLog(args.lcm_file, 'r')
    bag = rosbag.Bag(args.lcm_file + '.images.bag', 'w')

    # Read in YAML files.
    yml = []
    for y in args.yml_files:
        yml.append(yaml.load(file(y)))

    try:
        count = 0
        for event in log:
            for ii in range(len(args.lcm_channels)):
                l = args.lcm_channels[ii]
                y = yml[ii]

                if event.channel == l:
                    lcm_msg = image_t.decode(event.data)

                    # Fill in image.
                    if lcm_msg.pixelformat != image_t.PIXEL_FORMAT_MJPEG:
                        print "Encountered non-MJPEG compressed image. Skipping..."
                        continue

                    ros_msg = CompressedImage()
                    ros_msg.header.seq = event.eventnum

                    secs_float = float(lcm_msg.utime)/1e6
                    nsecs_float = (secs_float - np.floor(secs_float)) * 1e9
                    ros_msg.header.stamp.secs = np.uint32(np.floor(secs_float))
                    ros_msg.header.stamp.nsecs = np.uint32(np.floor(nsecs_float))
                    ros_msg.header.frame_id = "camera"

                    ros_msg.format = 'jpeg'

                    ros_msg.data = lcm_msg.data

                    # Fill in camera info
                    camera_info = CameraInfo()
                    camera_info.header = ros_msg.header
                    camera_info.height = y['image_height']
                    camera_info.width = y['image_width']

                    if y["distortion_model"] != "plumb_bob":
                        print "Encountered non-supported distorion model %s. Skipping..." % y["distortion_model"]
                        continue

                    camera_info.distortion_model = y["distortion_model"]
                    camera_info.D = y["distortion_coefficients"]['data']
                    camera_info.K = y["camera_matrix"]['data']
                    camera_info.R = y["rectification_matrix"]['data']
                    camera_info.P = y["projection_matrix"]['data']
                    camera_info.binning_x = args.binning_x
                    camera_info.binning_y = args.binning_y
                    camera_info.roi.x_offset = args.x_offset
                    camera_info.roi.y_offset = args.y_offset
                    camera_info.roi.height = args.height
                    camera_info.roi.width = args.width
                    camera_info.roi.do_rectify = args.do_rectify

                    bag.write("/camera/" + l + "/image_raw/compressed", ros_msg, ros_msg.header.stamp)
                    bag.write("/camera/" + l + "/camera_info", camera_info, camera_info.header.stamp)

                    count += 1

                    if count % 100 == 0:
                        print "Wrote %i events" % count
    finally:
        log.close()
        bag.close()

    print("Done.")
Пример #29
0
    def main_loop(self):
        img = Image()
        cimg = Image()
        r = rospy.Rate(15)
        while not rospy.is_shutdown():
            if self.pub_img_.get_num_connections() == 0:
                r.sleep()
                continue

            image = self.camProxy.getImageRemote(self.nameId)
            if image is None:
                continue
            # Deal with the image
            '''
            #Images received from NAO have
            if self.config['use_ros_time']:
                img.header.stamp = rospy.Time.now()
            else:
                img.header.stamp = rospy.Time(image[4] + image[5]*1e-6)
            '''
            img.header.stamp = rospy.Time.now()
            img.header.frame_id = self.frame_id
            img.height = image[1]
            img.width = image[0]
            nbLayers = image[2]
            if image[3] == kDepthColorSpace:
                encoding = "mono16"
            else:
                rospy.logerr("Received unknown encoding: {0}".format(image[3]))
            img.encoding = encoding
            img.step = img.width * nbLayers
            img.data = image[6]

            self.pub_img_.publish(img)

            # deal with the camera info
            infomsg = CameraInfo()
            infomsg.header = img.header
            # yes, this is only for an XTion / Kinect but that's the only thing supported by NAO
            ratio_x = float(640) / float(img.width)
            ratio_y = float(480) / float(img.height)
            infomsg.width = img.width
            infomsg.height = img.height
            # [ 525., 0., 3.1950000000000000e+02, 0., 525., 2.3950000000000000e+02, 0., 0., 1. ]
            infomsg.K = [
                525, 0, 3.1950000000000000e+02, 0, 525, 2.3950000000000000e+02,
                0, 0, 1
            ]
            infomsg.P = [
                525, 0, 3.1950000000000000e+02, 0, 0, 525,
                2.3950000000000000e+02, 0, 0, 0, 1, 0
            ]

            for i in range(3):
                infomsg.K[i] = infomsg.K[i] / ratio_x
                infomsg.K[3 + i] = infomsg.K[3 + i] / ratio_y
                infomsg.P[i] = infomsg.P[i] / ratio_x
                infomsg.P[4 + i] = infomsg.P[4 + i] / ratio_y

            infomsg.D = []
            infomsg.binning_x = 0
            infomsg.binning_y = 0
            infomsg.distortion_model = ""
            self.pub_info_.publish(infomsg)

            #Currently we only get depth image from the 3D camera of NAO, so we make up a fake color image (a black image)
            #and publish it under image_color topic.
            #This should be update when the color image from 3D camera becomes available.
            colorimg = np.zeros((img.height, img.width, 3), np.uint8)
            try:
                cimg = self.bridge.cv2_to_imgmsg(colorimg, "bgr8")
                cimg.header.stamp = img.header.stamp
                cimg.header.frame_id = img.header.frame_id
                self.pub_cimg_.publish(cimg)
            except CvBridgeError, e:
                print e

            r.sleep()
Пример #30
0
      P: [500.0, 0.0, 320, 0.0, 0.0, 500, 240, 0.0, 0.0, 0.0, 1.0, 0.0],
      binning_x: 0, binning_y: 0,
      roi: {x_offset: 0, y_offset: 0, height: 480, width: 640, do_rectify: false}}
"""
camera1.header.seq = 0
camera1.header.stamp.secs = 0
camera1.header.stamp.nsecs = 0
camera1.header.frame_id = 'camera1'
camera1.height = 480
camera1.width = 640
camera1.distortion_model = 'plumb_bob'
camera1.D = [0]
camera1.K = [500.0, 0.0, 320, 0.0, 500.0, 240.0, 0.0, 0.0, 1.0]
camera1.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
camera1.P = [500.0, 0.0, 320, 0.0, 0.0, 500, 240, 0.0, 0.0, 0.0, 1.0, 0.0]
camera1.binning_x = 0
camera1.binning_y = 0
camera1.roi.x_offset = 0
camera1.roi.y_offset = 0
camera1.roi.height = 480
camera1.roi.width = 640
camera1.roi.do_rectify = False
"""
camera2 = {header: {seq: 0, stamp: {secs: 0, nsecs: 0}, frame_id: 'camera2'},

      height: 480, width: 640, distortion_model: 'plumb_bob',

      D: [0],
      K: [300.0, 0.0, 640, 0.0, 300.0, 360.0, 0.0, 0.0, 1.0],
      R: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
      P: [300.0, 0.0, 640, 0.0, 0.0, 300, 360, 0.0, 0.0, 0.0, 1.0, 0.0],
Пример #31
0
    def camera_info_back(self):
        msg_header = Header()
        msg_header.frame_id = "f450/robot_camera_down"
        msg_roi = RegionOfInterest()
        msg_roi.x_offset= 0
        msg_roi.y_offset= 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify=0
        msg= CameraInfo()
        msg.header = msg_header
        msg.height = 480
        msg.width = 640
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [1.0, 0.0, 320.5, 0.0, 1.0, 240.5, 0.0, 0.0,1.0]
        msg.R = [1.0, 0.0,0.0,0.0,0.866,0.5,0.0,-0.5,8.66]
        msg.P = [1.0, 0.0, 320.5,0.0,0.0,1.0,240.5,0.0,0.0,0.0,1.0,0.0]  #same as the front camera
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi
        return msg
        
        def camera_info_front(self):
        msg_header = Header()
        msg_header.frame_id = "f450/robot_camera"
        msg_roi = RegionOfInterest()
        msg_roi.x_offset= 0
        msg_roi.y_offset= 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify=0
        msg= CameraInfo()
        msg.header = msg_header
        msg.height = 480
        msg.width = 640
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [1.0, 0.0, 320.5, 0.0, 1.0, 240.5, 0.0, 0.0,1.0]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,0.0, 1.0]
        msg.P = [1.0, 0.0, 320.5,0.0,0.0,1.0,240.5,0.0,0.0,0.0,1.0,0.0]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi
        return msg
        
    def convert_pixel_camera(self):
        pass
    
    def mocap_cb(self,msg):
        self.curr_pose = msg
    
    def state_cb(self,msg):
        if msg.mode == 'OFFBOARD':
            self.isReadyToFly = True
        else:
            print msg.mode
            
    def update_state_cb(self,data):
        self.mode= data.data
        print self.mode
    
    def tag_detections(self,msgs):
        rate = rospy.Rate(30)
        if len(msgs.detections)>0:
            msg = msgs.detections[0].pose   # The first element in the array is the pose
            self.tag_pt= msg.pose.pose.position
            self.pub.publish("FOUND UAV")
        else:
            if self.mode == "DESCENT":
                self.pub.publish("MISSING UAV")
                
    def get_descent(self, x, y,z):
        des_vel = PositionTarget()
        des_vel.header.frame_id = "world"
        des_vel.header.stamp= rospy.Time,from_sec(time.time())
        des_vel.coordinate_frame= 8
        des_vel.type_mask = 3527
        des_vel.velocity.x = x
        des_vel.velocity.y = y
        des_vel.velocity.z = z
        return des_vel
        
    def lawnmover(self, rect_x, rect_y, height, offset, offset_x):
        if len(self.loc)== 0 :
            takeoff = [self.curr_pose.pose.postion.x, self.curr_pose.pose.postion.y, height ,  0 , 0 , 0 , 0]
            move_to_offset = [self.curr_pose.pose.postion_x + offset, self.curr_posr.pose.postion_y - rect_y/2, height, 0 , 0 , 0 ,0 ]
        self.loc.append(takeoff)
        self.loc.append(move_to_offset)
        left = True
        
        while True:
            if left:
                x = self.loc[len(self.loc)-1][0]
                y = self.loc[len(self.loc)-1][1] + rect_y/3
                z = self.loc[len(self.loc)-1][2]
                self.loc.append([x,y,z,0,0,0,0])
                x = self.loc[len(self.loc)-1][0]
                y = self.loc[len(self.loc)-1][1] + rect_y/3
                z = self.loc[len(self.loc)-1][2]
                self.loc.append([x,y,z,0,0,0,0])
                x = self.loc[len(self.loc)-1][0]
                y = self.loc[len(self.loc)-1][1] + rect_y/3
                z = self.loc[len(self.loc)-1][2]
                left = False
                x = self.loc[len(self.loc)-1][0] + offset_x
                y = self.loc[len(self.loc)-1][0]
                z = self.loc[len(self.loc)-1][0]
                self.loc.append([x,y,z,0,0,0,0])
                if x > rect_x :
                    break
            else:
                x = self.loc[len(self.loc)-1][0]
                y = self.loc[len(self.loc)-1][1]-rect_y/3
                z = self.loc[len(self.loc)-1][2]
                self.loc.append([x,y,z,0,0,0,0])
                x = self.loc[len(self.loc)-1][0]
                y = self.loc[len(self.loc)-1][1]-rect_y/3
                z = self.loc[len(self.loc)-1][2]
                self.loc.append([x,y,z,0,0,0,0])
                x = self.loc[len(self.loc)-1][0]
                y = self.loc[len(self.loc)-1][1]-rect_y/3
                z = self.loc[len(self.loc)-1][2]
                self.loc.append([x,y,z,0,0,0,0])
                left = True
                x = self.loc[len(self.loc)-1][0]+ offset_x
                y = self.loc[len(self.loc)-1][1]
                z = self.loc[len(self.loc)-1][2]
                self.loc.append([x,y,z,0,0,0,0])
                if x > rect_x:
                    break
                    
        rate = rospy.Rate(10)
        self.des_pose = self.copy_pose(self.curr_pose)                        #Why do we need to copy curr_pose into des_pose
        shape = len(self.loc)
        pose_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size = 10)
        print self.mode
        
        while self.mode == "SURVEY" and not rospy.is_shutdown():
            if self.waypointIndex == shape :
                self.waypointIndex = 1                  # resetting the waypoint index
                
            if self.isReadyToFly:
            
                des_x = self.loc[self.waypointIndex][0]
                des_y = self.loc[self.waypointIndex][1]
                des_z = self.loc[self.waypointIndex][2]
                self.des_pose.pose.position.x = des_x
                self.des_pose.pose.position.y = des_y
                self.des_pose.pose.position.z = des_z
                self.des_pose.pose.orientation.x = self.loc[self.waypointIndex][3]
                self.des_pose.pose.orientation.y = self.loc[self.waypointIndex][4]
                self.des_pose.pose.orientation.z = self.loc[self.waypointIndex][5]
                self.des_pose.pose.orientation.w = self.loc[self.waypointIndex][6]
                curr_x = self.curr_pose.pose.position.x
                curr_y = self.curr_pose.pose.position.y
                curr_z = self.curr_pose.pose.position.z
                dist = math.sqrt((curr_x - des_x)*(curr_x - des_x) + (curr_y - des_y)*(curr_y - des_y ) + (curr_z - des_z)(curr_z - des_z))
                if dist < self.distThreshold:
                    self.waypointIndex += 1
            pose_pub.publish(self.des_pose)
            rate.sleep()
    
    def hover(self):
        location = self.hover_loc
        loc = [location,
               location,
               location,
               location,
               location,
               location,
               location,
               location,
               loaction]
               
        rate = rospy.Rate(10)
        rate.sleep()
        sharp = len(loc)
        pose_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size= 10)
        des_pose = self.copy_pose(self.curr_pose)
        waypoint_index = 0
        sim_ctr = 1
        print self.mode 
        while self.mode == "HOVER" and not rospy.is_shutdown():
            if waypoint_index==shape:
                waypoint_index = 0            # changing the way point index to 0
                sim_ctr = sim_ctr + 1
                print "HOVER STOP COUNTER:" + str(sim_ctr)
            if self.isReadyToFly:
                des_x = loc[waypoint_index][0]
                des_y = loc[waypoint_index][1]
                des_z = loc[waypoint_index][2]
                des_pose.pose.position.x = des_x
                des_pose.pose.position.y = des_y
                des_pose.pose.position.z = des_z
                des_pose.pose.orientation.x = loc[waypoint_index][3]
                des_pose.pose.orientation.y = loc[waypoint_index][4]
                des_pose.pose.orientation.z = loc[waypoint_index][5]
                des_pose.pose.oirentation.w = loc[waypoint_index][6]
                curr_x = self.curr_pose.pose.position.x
                curr_y = self.curr_pose.pose.position.y
                curr_z = self.curr_pose.pose.position.z
                
                dist = math.sqrt((curr_x - des_x)*(curr_x - des_x) + (curr_y - des_y)(curr_y - des_y) + (curr_z - des_z)*(curr_z - des_z))
                if dist<self.distThreshold :
                    waypoint_index += 1
                    
            if dist < self.distThreshold:
                waypoint_index += 1
                
        if sim_ctr == 50:
            pass
            
        pose_pub.publish(des_pose)
        rate.sleep()
        
    def scan(self, rect_y, offset_x):
        move=   ""
        rate=rospy.Rate(10)
        if self.waypointIndex %4 ==1:
            move="back"
            
        elif((self.waypointIndex + (self.waypointIndex % 4))/4)%2 == 0:
            move = "right"
        else:
            move = "left"
        print self.mode
        loc = self.curr_pose.pose.position
        print loc
        print rect_y
        print offset_x
        while self.mode == "SCAN" and not rospy.is_shutdown():
            if move == "left":
                self.vel_pub.publish(self.get_decent(0,0.5,0.1))
                if abs(self.curr_pose.pose.position.y - loc.y) > rect_y/3
                    self.pub.publish("SCAN COMPLETE")
                    
            elif move == "right":
                self.vel_pub.publish(self.get_decent(0,-0.5,0.1))
                if abs(self.curr_pose.pose.postion.y - loc.y) > rect_y/3
                    self.pub.publish("SCAN COMPLETE")
                    
            elif move == "back":
                self.vel_pub.publish(self.get_decent(-0.3,0,1))
                if abs(self.curr_pose.pose.position.x - loc.x) > offset_x:  
                    self.pub.publish("SCAN COMPLETE")
                    
            else:
                print "move error"
                
            print abs(self.curr_pose.pose.position.y - loc.y)
            print abs(self.curr_pose.pose.position.x - loc.x)
            rate.sleep()
    
    
    def descent(self):
        rate = rospy.Rate(10) # 10 Hz
        time_step = 1/10
        print self.mode
        self.x_change = 1
        self.y_change = 1
        self.x_prev_error = 0
        self.y_prev_error = 0
        self.x_sum_error=0
        self.y_sum_error=0
        self.x_exp_movement=0
        self.y_exp_movement=0
        self.x_track = 0
        self.y_track = 0
        self.curr_xerror=0
        self.curr_yerror=0
        
        while self.mode == "DESCENT" and self.curr_pose.pose.position.z > 0.2 and not rospy.is_shutdown():
            err_x = self.curr_pose.pose.position.x - self.tag_pt.x
            err_y = self.curr_pose.pose.position.y - self.tag_pt.y
            self.x_change += err_x*self.KP + (((self.x_prev_error-self.curr_xerror)/time_step)* self.KD) + (self.x_sum_error * self.KI*time_step)
            self.y_change += err_y*self.KP + (((self.y_prev_error-self.curr_yerror)/time_step)* self.KD) + (self.y_sum_error * self.KI*time_step)
            self.x_change =  max(min(0.4,self.x_change), -0.4)
            self.y_change =  max(min(0.4,self.y_change), -0.4)
            if err_x > 0 and err_y < 0:
                des = self.get_descent(-1*abs(self.x_change), 1*abs(self.y_change), -0.08)
            elif err_x > 0 and err_y > 0:
                des = self.get_descent(-1*abs(self.x_change), -1*abs(self.y_change), -0.08)
            elif err_x < 0 and err_y > 0:
                des = self.get_descent(1*abs(self.x_change), -1*abs(self.y_change), -0.08)
            elif err_x < 0 and err_y < 0:
                des = self.get_descent(1*abs(self.x_change), 1*abs(self.y_change), -0.08)
            else:
                des = self.get_descent(self.x_change, self.y_change, -0.08)
            self.vel_pub.publish(des)
            rate.sleep()
            self.x_exp_movement = self.x_change
            self.y_exp_movement = self.y_change
            self.x_track = self.x_exp_movement + self.curr_pose.pose.position.x
            self.y_track = self.y_exp_movement + self.curr_pose.pose.position.y
            self.curr_xerror= self.x_track - self.tag_pt.x
            self.curr_yerror= self.y_track - self.tag_pt.y
            self.x_prev_error= err_x
            self.y_prev_error= err_y
            self.x_sum_error += err_x
            self.y_sum_error += err_y
                
        if self.curr_pose.pose.position.z < 0.2:
            #Perform the necessary action to complete pickup instruct actuators
            self.pub.publish("PICKUP COMPLETE")
                
    def yolo_descent(self):
        rate= rospy.Rate(10)
        print self.mode
        self.x_change = 1
        self.y_change = 1
        self.x_prev_error=0
        self.y_prev_error=0
        self.x_sum_error=0
        self.y_sum_error=0
        timeout = 120
        yolo_KP = 0.08
        yolo_KD = 0.004
        yolo_KI = 0.0005
        while self.mode == "DESCENT" and not rospy.is_shutdown():
            err_x = 0 - self.target[0]
            err_y = 0 - self.target[1]

            self.x_change += err_x * yolo_KP + (self.x_prev_error * yolo_KD) + (self.x_sum_error * yolo_KI)
            self.y_change += err_y * yolo_KP + (self.y_prev_error * yolo_KD) + (self.y_sum_error * yolo_KI)

            self.x_change = max(min(0.4, self.x_change), -0.4)
            self.y_change = max(min(0.4, self.y_change), -0.4)

            if err_x > 0 and err_y < 0:
                des = self.get_descent(-1 * self.x_change, -1 * self.y_change, -0.08)
            elif err_x < 0 and err_y > 0:
                des = self.get_descent(-1 * self.x_change, -1 * self.y_change, -0.08)
            else:
                des = self.get_descent(self.x_change, self.y_change, -0.08)

            self.vel_pub.publish(des)
            timeout -= 1
            rate.sleep()
            self.x_prev_error = err_x
            self.y_prev_error = err_y
            self.x_sum_error += err_x
            self.y_sum_error += err_y
            if timeout == 0 and self.curr_pose.pose.position.z > 0.7:
                timeout = 120
                print timeout
                self.x_change = 0
                self.y_change = 0
                self.x_sum_error = 0
                self.y_sum_error = 0
                self.x_prev_error = 0
                self.y_prev_error = 0

            if self.curr_pose.pose.position.z < 0.2:  # TODO
                # self.mode = "HOVER" # PICK UP
                # self.hover_loc = [self.curr_pose.pose.position.x]  # TODO
                self.pub.publish("PICKUP COMPLETE")

    def rt_survey(self):
        location = [self.saved_location.pose.position.x,
                    self.saved_location.pose.position.y,
                    self.saved_location.pose.position.z,
                    0, 0, 0, 0]
        loc = [location,
               location,
               location,
               location,
               location]

        rate = rospy.Rate(10)
        rate.sleep()
        shape = len(loc)
        pose_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10)
        des_pose = self.copy_pose(self.curr_pose)
        waypoint_index = 0
        sim_ctr = 1
        print self.mode

        while self.mode == "RT_SURVEY" and not rospy.is_shutdown():
            if waypoint_index == shape:
                waypoint_index = 0
                sim_ctr += 1
            if self.isReadyToFly:

                des_x = loc[waypoint_index][0]
                des_y = loc[waypoint_index][1]
                des_z = loc[waypoint_index][2]
                des_pose.pose.position.x = des_x
                des_pose.pose.position.y = des_y
                des_pose.pose.position.z = des_z
                des_pose.pose.orientation.x = loc[waypoint_index][3]
                des_pose.pose.orientation.y = loc[waypoint_index][4]
                des_pose.pose.orientation.z = loc[waypoint_index][5]
                des_pose.pose.orientation.w = loc[waypoint_index][6]

                curr_x = self.curr_pose.pose.position.x
                curr_y = self.curr_pose.pose.position.y
                curr_z = self.curr_pose.pose.position.z

                dist = math.sqrt((curr_x - des_x)*(curr_x - des_x) + (curr_y - des_y)*(curr_y - des_y) +
                                 (curr_z - des_z)*(curr_z - des_z))
                if dist < self.distThreshold:
                    waypoint_index += 1

            if sim_ctr == 5:
                self.pub.publish("RTS COMPLETE")
                self.saved_location = None

            pose_pub.publish(des_pose)
            rate.sleep()

    def controller(self):
        while not rospy.is_shutdown():

            if self.mode == "SURVEY":
                self.lawnmover(200, 20, 7, 10, 3)

            if self.mode == "HOVER":
                self.hover()

            if self.mode == "SCAN":
                self.scan(20, 3) # pass x_offset, length of rectangle, to bound during small search

            if self.mode == "TEST":
                print self.mode
                self.vel_pub.publish(self.get_descent(0, 0.1, 0))

            if self.mode == "DESCENT":
                self.descent()

            if self.mode == "RT_SURVEY":
                self.rt_survey()

    def planner(self, msg):
        if msg.data == "FOUND UAV" and self.mode == "SURVEY":
            self.saved_location = self.curr_pose
            self.mode = "SCAN"

        if msg.data == "FOUND UAV" and self.mode == "SCAN":
            self.detection_count += 1
            print self.detection_count
            if self.detection_count > 25:
                self.hover_loc = [self.curr_pose.pose.position.x,
                                  self.curr_pose.pose.position.y,
                                  self.curr_pose.pose.position.z,
                                  0, 0, 0, 0]
                self.mode = "HOVER"
                self.detection_count = 0

        if msg.data == "FOUND UAV" and self.mode == "HOVER":
            print self.detection_count
            self.detection_count += 1
            if self.detection_count > 40:
                self.mode = "DESCENT"
                self.detection_count = 0

        if msg.data == "MISSING UAV" and self.mode == "DESCENT":
            self.missing_count += 1
            if self.missing_count > 80:
                self.mode = "HOVER"
                self.missing_count = 0

        if msg.data == "FOUND UAV" and self.mode == "DESCENT":
            self.missing_count = 0

        if msg.data == "SCAN COMPLETE":
            self.mode = "RT_SURVEY"
            self.detection_count = 0

        if msg.data == "HOVER COMPLETE":
            if self.waypointIndex == 0:  # TODO remove this, this keeps the drone in a loop of search
                self.mode = "SURVEY"
            else:
                self.mode = "RT_SURVEY"
            self.detection_count = 0

        if msg.data == "RTS COMPLETE":
            self.mode = "SURVEY"

        if msg.data == "PICKUP COMPLETE":
            # self.mode = "CONFIRM_PICKUP"

            # go back and hover at takeoff location
            self.hover_loc = [self.loc[0][0], self.loc[0][1], self.loc[0][2], 0, 0, 0, 0]
            self.mode = "HOVER"
            self.loc = []
            self.waypointIndex = 0
camera_info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]  # 3x3 row-major matrix
# Projection/camera matrix
#     [fx'  0  cx' Tx]
# P = [ 0  fy' cy' Ty]
#     [ 0   0   1   0]
camera_info.P = [
    361.042694, 0.000000, 360.074173, 0.000000, 0.000000, 423.122498,
    248.546022, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000
]
# Binning refers here to any camera setting which combines rectangular
#  neighborhoods of pixels into larger "super-pixels." It reduces the
#  resolution of the output image to
#  (width / binning_x) x (height / binning_y).
# The default values binning_x = binning_y = 0 is considered the same
#  as binning_x = binning_y = 1 (no subsampling).
camera_info.binning_x = 1
camera_info.binning_y = 1
camera_info.roi.do_rectify = True


def publish_info(image):
    global camera_info
    global camera_info_pub
    camera_info.header = image.header
    # camera_info.header.stamp = rospy.Time.now()
    camera_info_pub.publish(camera_info)
    camera_picture_pub.publish(image)


if __name__ == '__main__':
    # driver = telloCameraInfo()
Пример #33
0
    def main_loop(self):
        img = Image()
        cimg = Image()
        r = rospy.Rate(15)
        while not rospy.is_shutdown():
            if self.pub_img_.get_num_connections() == 0:
                r.sleep()
                continue

            image = self.camProxy.getImageRemote(self.nameId)
            if image is None:
                continue
            # Deal with the image
            '''
            #Images received from NAO have
            if self.config['use_ros_time']:
                img.header.stamp = rospy.Time.now()
            else:
                img.header.stamp = rospy.Time(image[4] + image[5]*1e-6)
            '''
            img.header.stamp = rospy.Time.now()
            img.header.frame_id = self.frame_id
            img.height = image[1]
            img.width = image[0]
            nbLayers = image[2]
            if image[3] == kDepthColorSpace:
                encoding = "mono16"
            else:
                rospy.logerr("Received unknown encoding: {0}".format(image[3]))
            img.encoding = encoding
            img.step = img.width * nbLayers
            img.data = image[6]

            self.pub_img_.publish(img)
            
            # deal with the camera info
            infomsg = CameraInfo()
            infomsg.header = img.header
            # yes, this is only for an XTion / Kinect but that's the only thing supported by NAO
            ratio_x = float(640)/float(img.width)
            ratio_y = float(480)/float(img.height)
            infomsg.width = img.width
            infomsg.height = img.height
            # [ 525., 0., 3.1950000000000000e+02, 0., 525., 2.3950000000000000e+02, 0., 0., 1. ]
            infomsg.K = [ 525, 0, 3.1950000000000000e+02,
                         0, 525, 2.3950000000000000e+02,
                         0, 0, 1 ]
            infomsg.P = [ 525, 0, 3.1950000000000000e+02, 0,
                         0, 525, 2.3950000000000000e+02, 0,
                         0, 0, 1, 0 ]

            for i in range(3):
                infomsg.K[i] = infomsg.K[i] / ratio_x
                infomsg.K[3+i] = infomsg.K[3+i] / ratio_y
                infomsg.P[i] = infomsg.P[i] / ratio_x
                infomsg.P[4+i] = infomsg.P[4+i] / ratio_y

            infomsg.D = []
            infomsg.binning_x = 0
            infomsg.binning_y = 0
            infomsg.distortion_model = ""
            self.pub_info_.publish(infomsg)

	    #Currently we only get depth image from the 3D camera of NAO, so we make up a fake color image (a black image)
            #and publish it under image_color topic.
            #This should be update when the color image from 3D camera becomes available.
            colorimg = np.zeros((img.height,img.width,3), np.uint8)
            try:
              cimg = self.bridge.cv2_to_imgmsg(colorimg, "bgr8")
              cimg.header.stamp = img.header.stamp
              cimg.header.frame_id = img.header.frame_id
              self.pub_cimg_.publish(cimg)
            except CvBridgeError, e:
              print e

            r.sleep()
    cam_info.R = [0] * 9
    cam_info.K = [0] * 9

    cam_info.K[0] = params['cam0']['intrinsics'][0]
    cam_info.K[2] = params['cam0']['intrinsics'][2]
    cam_info.K[4] = params['cam0']['intrinsics'][1]
    cam_info.K[5] = params['cam0']['intrinsics'][3]
    cam_info.K[8] = 1

    cam_info.R[0] = 1
    cam_info.R[4] = 1
    cam_info.R[8] = 1

    cam_info.P[0] = params['cam0']['intrinsics'][0]
    cam_info.P[2] = params['cam0']['intrinsics'][2]
    cam_info.P[5] = params['cam0']['intrinsics'][1]
    cam_info.P[6] = params['cam0']['intrinsics'][3]
    cam_info.P[10] = 1



    cam_info.binning_x = 0
    cam_info.binning_y = 0
    cam_info.roi.x_offset = 0
    cam_info.roi.y_offset = 0
    cam_info.roi.height = params['cam0']['resolution'][1]
    cam_info.roi.width = params['cam0']['resolution'][0]
    cam_info.roi.do_rectify = False


    listener()
    rospy.Subscriber("image_raw", Image, callback)

    # spin() simply keeps python from exiting until this node is stopped
    rospy.spin()

if __name__ == '__main__':

    rospy.init_node('pub_camera_info_node', anonymous=True)

    calib_file = rospy.get_param('~calib')

    with file(calib_file, 'r') as f:
        params = yaml.load(f)

    cam_info.height = params['height']
    cam_info.width = params['width']
    cam_info.distortion_model = params['distortion_model']
    cam_info.K = params['K']
    cam_info.D = params['D']
    cam_info.R = params['R']
    cam_info.P = params['P']
    cam_info.binning_x = params['binning_x']
    cam_info.binning_y = params['binning_y']
    cam_info.roi.x_offset = params['roi']['x_offset']
    cam_info.roi.y_offset = params['roi']['y_offset']
    cam_info.roi.height = params['roi']['height']
    cam_info.roi.width = params['roi']['width']
    cam_info.roi.do_rectify = params['roi']['do_rectify']


    listener()