示例#1
0
 def publish( self ):
     # Get the image.
     image = self.__videoDeviceProxy.getImageRemote( self.__videoDeviceProxyName );
         
     # Create Image message.
     ImageMessage = Image();
     ImageMessage.header.stamp.secs = image[ 5 ];
     ImageMessage.width = image[ 0 ];
     ImageMessage.height = image[ 1 ];
     ImageMessage.step = image[ 2 ] * image[ 0 ];
     ImageMessage.is_bigendian = False;
     ImageMessage.encoding = 'bgr8';
     ImageMessage.data = image[ 6 ];
     
     self.__imagePublisher.publish( ImageMessage );
     
     # Create CameraInfo message.
     # Data from the calibration phase is hard coded for now.
     CameraInfoMessage = CameraInfo();
     CameraInfoMessage.header.stamp.secs = image[ 5 ];
     CameraInfoMessage.width = image[ 0 ];
     CameraInfoMessage.height = image[ 1 ];
     CameraInfoMessage.D = [ -0.0769218451517258, 0.16183180613612602, 0.0011626049774280595, 0.0018733894100460534, 0.0 ];
     CameraInfoMessage.K = [ 581.090096189648, 0.0, 341.0926325830606, 0.0, 583.0323248080421, 241.02441593704128, 0.0, 0.0, 1.0 ];
     CameraInfoMessage.R = [ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ];
     CameraInfoMessage.P = [ 580.5918359588198, 0.0, 340.76398441334106, 0.0, 0.0, 582.4042541534321, 241.04182225157172, 0.0, 0.0, 0.0, 1.0, 0.0] ;
     CameraInfoMessage.distortion_model = 'plumb_bob';
     
     #CameraInfoMessage.roi.x_offset = self.__ROIXOffset;
     #CameraInfoMessage.roi.y_offset = self.__ROIYOffset;
     #CameraInfoMessage.roi.width = self.__ROIWidth;
     #CameraInfoMessage.roi.height = self.__ROIHeight;
     #CameraInfoMessage.roi.do_rectify = self.__ROIDoRectify;
     
     self.__cameraInfoPublisher.publish( CameraInfoMessage );
    def send_test_messages(self, filename):
        self.msg_received = False
        # Publish the camera info TODO make this a field in the annotations file to dictate the source calibration file
        msg_info = CameraInfo()
        msg_info.height = 480
        msg_info.width = 640
        msg_info.distortion_model = "plumb_bob"
        msg_info.D = [-0.28048157543793056, 0.05674481026365553, -0.000988764087143394, -0.00026869128565781613, 0.0]
        msg_info.K = [315.128501, 0.0, 323.069638, 0.0, 320.096636, 218.012581, 0.0, 0.0, 1.0]
        msg_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg_info.P = [217.48876953125, 0.0, 321.3154072384932, 0.0, 0.0, 250.71084594726562, 202.30416165274983, 0.0,
                      0.0, 0.0, 1.0, 0.0]
        msg_info.roi.do_rectify = False

        # Publish the test image
        img = cv2.imread(filename)
        cvb = CvBridge()
        msg_raw = cvb.cv2_to_imgmsg(img, encoding="bgr8")
        self.pub_info.publish(msg_info)
        self.pub_raw.publish(msg_raw)

        # Wait for the message to be received
        timeout = rospy.Time.now() + rospy.Duration(10)  # Wait at most 5 seconds for the node to reply
        while not self.msg_received and not rospy.is_shutdown() and rospy.Time.now() < timeout:
            rospy.sleep(0.1)
        self.assertLess(rospy.Time.now(), timeout, "Waiting for apriltag detection timed out.")
def yaml_to_CameraInfo(calib_yaml):
    """
    Parse a yaml file containing camera calibration data (as produced by
    rosrun camera_calibration cameracalibrator.py) into a
    sensor_msgs/CameraInfo msg.

    Parameters
    ----------
    yaml_fname : str
        Path to yaml file containing camera calibration data

    Returns
    -------
    camera_info_msg : sensor_msgs.msg.CameraInfo
        A sensor_msgs.msg.CameraInfo message containing the camera calibration
        data
    """
    # Load data from file
    calib_data = yaml.load(calib_yaml)
    # Parse
    camera_info_msg = CameraInfo()
    camera_info_msg.width = calib_data["image_width"]
    camera_info_msg.height = calib_data["image_height"]
    camera_info_msg.K = calib_data["camera_matrix"]["data"]
    camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
    camera_info_msg.R = calib_data["rectification_matrix"]["data"]
    camera_info_msg.P = calib_data["projection_matrix"]["data"]
    camera_info_msg.distortion_model = calib_data["distortion_model"]
    return camera_info_msg
示例#4
0
  def __init__(self):
    rospy.init_node('image_converter', anonymous=True)
    self.filtered_face_locations = rospy.get_param('camera_topic')
    self.image_pub = rospy.Publisher(self.filtered_face_locations,Image)
    self.image_info = rospy.Publisher('camera_info',CameraInfo)
    self.bridge = CvBridge()
    cap = cv2.VideoCapture('/home/icog-labs/Videos/video.mp4')
    print cap.get(5)
    path = rospkg.RosPack().get_path("robots_config")
    path = path + "/robot/camera.yaml"
    stream = file(path, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    while (not rospy.is_shutdown()) and (cap.isOpened()):
        self.keystroke = cv2.waitKey(1000 / 30)
        ret, frame = cap.read()

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # cv2.imshow('frame', gray)
        self.image_pub.publish(self.bridge.cv2_to_imgmsg(gray, "mono8"))
        self.image_info.publish(cam_info)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
示例#5
0
文件: image.py 项目: lucasw/vimjay
    def __init__(self):
        rospy.init_node('image_publish')
        name = sys.argv[1]
        image = cv2.imread(name)
        #cv2.imshow("im", image)
        #cv2.waitKey(5)

        hz = rospy.get_param("~rate", 1)
        frame_id = rospy.get_param("~frame_id", "map")
        use_image = rospy.get_param("~use_image", True)
        rate = rospy.Rate(hz)

        self.ci_in = None
        ci_sub = rospy.Subscriber('camera_info_in', CameraInfo,
                                  self.camera_info_callback, queue_size=1)

        if use_image:
            pub = rospy.Publisher('image', Image, queue_size=1)
        ci_pub = rospy.Publisher('camera_info', CameraInfo, queue_size=1)

        msg = Image()
        msg.header.stamp = rospy.Time.now()
        msg.header.frame_id = frame_id
        msg.encoding = 'bgr8'
        msg.height = image.shape[0]
        msg.width = image.shape[1]
        msg.step = image.shape[1] * 3
        msg.data = image.tostring()
        if use_image:
            pub.publish(msg)

        ci = CameraInfo()
        ci.header = msg.header
        ci.height = msg.height
        ci.width = msg.width
        ci.distortion_model ="plumb_bob"
        # TODO(lucasw) need a way to set these values- have this node
        # subscribe to an input CameraInfo?
        ci.D = [0.0, 0.0, 0.0, 0, 0]
        ci.K = [500.0, 0.0, msg.width/2, 0.0, 500.0, msg.height/2, 0.0, 0.0, 1.0]
        ci.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        ci.P = [500.0, 0.0, msg.width/2, 0.0, 0.0, 500.0, msg.height/2, 0.0,  0.0, 0.0, 1.0, 0.0]
        # ci_pub.publish(ci)

        # TODO(lwalter) only run this is hz is positive,
        # otherwise wait for input trigger message to publish an image
        while not rospy.is_shutdown():
            if self.ci_in is not None:
                ci = self.ci_in

            msg.header.stamp = rospy.Time.now()
            ci.header = msg.header
            if use_image:
                pub.publish(msg)
            ci_pub.publish(ci)

            if hz <= 0:
                rospy.sleep()
            else:
                rate.sleep()
示例#6
0
def sim():
    global t, pose, camInfoMsg
    global centerPub, targetVelPub, camVelPub, camInfoPub, br, tfl
    global switchTimer, imageTimer, estimatorOn
    
    rospy.init_node("sim")
    estimatorOn = True
    
    centerPub = rospy.Publisher("markerCenters",Center,queue_size=10)
    targetVelPub = rospy.Publisher("ugv0/body_vel",TwistStamped,queue_size=10)
    camVelPub = rospy.Publisher("image/body_vel",TwistStamped,queue_size=10)
    cameraName = rospy.get_param(rospy.get_name()+"/camera","camera")
    camInfoPub = rospy.Publisher(cameraName+"/camera_info",CameraInfo,queue_size=1)
    br = tf.TransformBroadcaster()
    tfl = tf.TransformListener()
    
    # Camera parameters
    camInfoMsg = CameraInfo()
    camInfoMsg.D = distCoeffs.tolist()
    camInfoMsg.K = camMat.reshape((-1)).tolist()
    
    # Wait for node to get cam info
    while (camVelPub.get_num_connections() == 0) and (not rospy.is_shutdown()):
        # publish camera parameters
        camInfoPub.publish(camInfoMsg)
        rospy.sleep(0.5)
    
    # Publishers
    rospy.Timer(rospy.Duration(1.0/velRate),velPubCB)
    imageTimer = rospy.Timer(rospy.Duration(1.0/frameRate),imagePubCB)
    rospy.Timer(rospy.Duration(0.5),camInfoPubCB)
    switchTimer = rospy.Timer(rospy.Duration(11.0),switchCB,oneshot=True)
    
    # Initial conditions
    startTime = rospy.get_time()
    camPos = np.array([0,-1,1.5])
    camOrient = np.array([-1*np.sqrt(2)/2,0,0,np.sqrt(2)/2])
    targetPos = np.array([0,1,0])
    targetOrient = np.array([0,0,0,1])
    pose = np.concatenate((camPos,camOrient,targetPos,targetOrient))
    
    r = rospy.Rate(intRate)
    h = 1.0/intRate
    while not rospy.is_shutdown():
        t = np.array(rospy.get_time() - startTime)
        poseDot = poseDyn(t,pose)
        pose = pose + poseDot*h
        
        # Send Transform
        camPos = pose[0:3]
        camOrient = pose[3:7]
        targetPos = pose[7:10]
        targetOrient = pose[10:]
        br.sendTransform(targetPos,targetOrient,rospy.Time.now(),"ugv0","world")
        br.sendTransform(camPos,camOrient,rospy.Time.now(),"image","world")
        
        r.sleep()
	def createMsg(self):
		msg = CameraInfo()
		msg.height = 480
		msg.width = 640
		msg.distortion_model = "plumb_bob"
		msg.D = [0.08199114285264993, -0.04549390835713936, -0.00040960290587863145, 0.0009833748346549968, 0.0]
		msg.K = [723.5609128875188, 0.0, 315.9845354772031, 0.0, 732.2615109685506, 242.26660681756633,\
				 0.0, 0.0, 1.0]
		msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
		msg.P = [737.1736450195312, 0.0, 316.0324931112791, 0.0, 0.0, 746.1573486328125, 241.59042622688867,\
				 0.0, 0.0, 0.0, 1.0, 0.0]
		self.msg = msg
示例#8
0
def load_cam_info(calib_file):
    cam_info = CameraInfo()
    with open(calib_file,'r') as cam_calib_file :
        cam_calib = yaml.load(cam_calib_file)
        cam_info.height = cam_calib['image_height']
        cam_info.width = cam_calib['image_width']
        cam_info.K = cam_calib['camera_matrix']['data']
        cam_info.D = cam_calib['distortion_coefficients']['data']
        cam_info.R = cam_calib['rectification_matrix']['data']
        cam_info.P = cam_calib['projection_matrix']['data']
        cam_info.distortion_model = cam_calib['distortion_model']
    return cam_info
 def parse_yaml(self, filename):
     stream = file(filename, "r")
     calib_data = yaml.load(stream)
     cam_info = CameraInfo()
     cam_info.width = calib_data["image_width"]
     cam_info.height = calib_data["image_height"]
     cam_info.K = calib_data["camera_matrix"]["data"]
     cam_info.D = calib_data["distortion_coefficients"]["data"]
     cam_info.R = calib_data["rectification_matrix"]["data"]
     cam_info.P = calib_data["projection_matrix"]["data"]
     cam_info.distortion_model = calib_data["distortion_model"]
     return cam_info
def parse_yaml(filename):
    stream = file(filename, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    return cam_info
示例#11
0
def yaml_to_CameraInfo(yaml_fname):
    with open(yaml_fname, "r") as file_handle:
        calib_data = yaml.load(file_handle)
 
    camera_info_msg = CameraInfo()
    camera_info_msg.width = calib_data["image_width"]
    camera_info_msg.height = calib_data["image_height"]
    camera_info_msg.K = calib_data["camera_matrix"]["data"]
    camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
    camera_info_msg.R = calib_data["rectification_matrix"]["data"]
    camera_info_msg.P = calib_data["projection_matrix"]["data"]
    camera_info_msg.distortion_model = calib_data["distortion_model"]
    return camera_info_msg
示例#12
0
    def get_cam_info(self, cam_name):
        cam_name = "calibrations/" + cam_name + ".yaml"
        stream = file(os.path.join(os.path.dirname(__file__), cam_name), 'r')

        calib_data = yaml.load(stream)

        cam_info = CameraInfo()
        cam_info.width = calib_data['image_width']
        cam_info.height = calib_data['image_height']
        cam_info.K = calib_data['camera_matrix']['data']
        cam_info.D = calib_data['distortion_coefficients']['data']
        cam_info.R = calib_data['rectification_matrix']['data']
        cam_info.P = calib_data['projection_matrix']['data']
        cam_info.distortion_model = calib_data['distortion_model']
        return cam_info
    def toCameraInfo(self):
        msg = CameraInfo()

        (msg.width, msg.height) = self.size

        if self.D.size > 5:
            msg.distortion_model = "rational_polynomial"
        else:
            msg.distortion_model = "plumb_bob"

        msg.D = numpy.ravel(self.D).copy().tolist()
        msg.K = numpy.ravel(self.K).copy().tolist()
        msg.R = numpy.ravel(self.R).copy().tolist()
        msg.P = numpy.ravel(self.P).copy().tolist()

        return msg
示例#14
0
def GetCameraInfo(width, height):
	cam_info = CameraInfo()
	cam_info.width = width
	cam_info.height = height
	cam_info.distortion_model = model
	#cam_info.D = [0.0]*5
	#cam_info.K = [0.0]*9
	#cam_info.R = [0.0]*9
	#cam_info.P = [0.0]*12
	cam_info.D = D
        cam_info.K = K
        cam_info.R = R
        cam_info.P = P
	cam_info.binning_x = 0
	cam_info.binning_y = 0
	return cam_info
示例#15
0
def pickle_to_info(info_pickle):
    info = CameraInfo()
    info.header.stamp = rospy.Time()
    info.header.seq = info_pickle.header.seq
    info.header.frame_id = info_pickle.header.frame_id
    info.roi.x_offset = info_pickle.roi.x_offset
    info.roi.y_offset = info_pickle.roi.y_offset
    info.roi.height = info_pickle.roi.height
    info.roi.width = info_pickle.roi.width
    info.height = info_pickle.height
    info.width = info_pickle.width
    info.D = info_pickle.D
    info.K = info_pickle.K
    info.R = info_pickle.R
    info.P = info_pickle.P
    
    return info
示例#16
0
def sim():
    global t, pose, camInfoMsg
    global centerPub, velPub, camInfoPub, br, tfl
    
    rospy.init_node("sim")
    
    centerPub = rospy.Publisher("markerCenters",Center,queue_size=10)
    velPub = rospy.Publisher("image/local_vel",TwistStamped,queue_size=10)
    cameraName = rospy.get_param(rospy.get_name()+"/camera","camera")
    camInfoPub = rospy.Publisher(cameraName+"/camera_info",CameraInfo,queue_size=1)
    br = tf.TransformBroadcaster()
    tfl = tf.TransformListener()
    
    # Camera parameters
    camInfoMsg = CameraInfo()
    camInfoMsg.D = distCoeffs.tolist()
    camInfoMsg.K = camMat.reshape((-1)).tolist()
    
    # Wait for node to get cam info
    while (velPub.get_num_connections() == 0) and (not rospy.is_shutdown()):
        # publish camera parameters
        camInfoPub.publish(camInfoMsg)
        rospy.sleep(0.5)
    
    # Publishers
    rospy.Timer(rospy.Duration(1.0/velRate),velPubCB)
    rospy.Timer(rospy.Duration(1.0/frameRate),imagePubCB)
    rospy.Timer(rospy.Duration(0.5),camInfoPubCB)
    
    # Initial conditions
    startTime = rospy.get_time()
    camPos = np.array([0,-1,1.5])
    camOrient = np.array([-1*np.sqrt(2)/2,0,0,np.sqrt(2)/2])
    targetPos = np.array([-1.5,1.5,0])
    targetOrient = np.array([0,0,0,1])
    pose = np.concatenate((camPos,camOrient,targetPos,targetOrient))
    
    r = rospy.Rate(intRate)
    h = 1.0/intRate
    while not rospy.is_shutdown():
        t = np.array(rospy.get_time() - startTime)
        poseDot = poseDyn(t,pose)
        pose = pose + poseDot*h
        
        r.sleep()
示例#17
0
    def default(self, ci='unused'):
        if not self.component_instance.capturing:
            return # press [Space] key to enable capturing

        image_local = self.data['image']

        image = Image()
        image.header = self.get_ros_header()
        image.height = self.component_instance.image_height
        image.width = self.component_instance.image_width
        image.encoding = self.encoding
        image.step = image.width * 4

        # VideoTexture.ImageRender implements the buffer interface
        image.data = bytes(image_local)

        # fill this 3 parameters to get correcty image with stereo camera
        Tx = 0
        Ty = 0
        R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

        intrinsic = self.data['intrinsic_matrix']

        camera_info = CameraInfo()
        camera_info.header = image.header
        camera_info.height = image.height
        camera_info.width = image.width
        camera_info.distortion_model = 'plumb_bob'
        camera_info.D = [0]
        camera_info.K = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2],
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2],
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2]]
        camera_info.R = R
        camera_info.P = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2], Tx,
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2], Ty,
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2], 0]

        if self.pub_tf:
            self.publish_with_robot_transform(image)
        else:
            self.publish(image)
        self.topic_camera_info.publish(camera_info)
def parse_yaml(filename):
    stream = file(filename, "r")
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data["image_width"]
    cam_info.height = calib_data["image_height"]
    cam_info.K = calib_data["camera_matrix"]["data"]
    cam_info.D = calib_data["distortion_coefficients"]["data"]
    cam_info.R = calib_data["rectification_matrix"]["data"]
    cam_info.P = calib_data["projection_matrix"]["data"]
    cam_info.distortion_model = calib_data["distortion_model"]
    cam_info.binning_x = calib_data["binning_x"]
    cam_info.binning_y = calib_data["binning_y"]
    cam_info.roi.x_offset = calib_data["roi"]["x_offset"]
    cam_info.roi.y_offset = calib_data["roi"]["y_offset"]
    cam_info.roi.height = calib_data["roi"]["height"]
    cam_info.roi.width = calib_data["roi"]["width"]
    cam_info.roi.do_rectify = calib_data["roi"]["do_rectify"]

    return cam_info
示例#19
0
    def fetch_image(self, cam):
        cam.simulate()

        if not cam.pixels:
            return None, None
        cv_img = cv.CreateImageHeader((cam.width , cam.height), cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_img, cam.pixels, cam.width*3)
        cv.ConvertImage(cv_img, cv_img, cv.CV_CVTIMG_FLIP)
        im = self.bridge.cv_to_imgmsg(cv_img, "rgb8")

        caminfo = CameraInfo()
        caminfo.header = im.header
        caminfo.height = cam.height
        caminfo.width = cam.width
        caminfo.D = 5*[0.]
        caminfo.K = sum([list(r) for r in cam.K],[])
        caminfo.P = sum([list(r) for r in cam.P],[])
        caminfo.R = sum([list(r) for r in cam.R],[])

        return im, caminfo
def parse_yaml(filename):
    stream = file(filename, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    cam_info.binning_x = calib_data['binning_x']
    cam_info.binning_y = calib_data['binning_y']
    cam_info.roi.x_offset = calib_data['roi']['x_offset']
    cam_info.roi.y_offset = calib_data['roi']['y_offset']
    cam_info.roi.height = calib_data['roi']['height']
    cam_info.roi.width = calib_data['roi']['width']
    cam_info.roi.do_rectify = calib_data['roi']['do_rectify']
    
    return cam_info
示例#21
0
文件: sensors.py 项目: cyy1991/carla
    def build_camera_info(self):
        """
        computing camera info

        camera info doesn't change over time
        """
        camera_info = CameraInfo()
        camera_info.header.frame_id = self.name
        camera_info.width = self.carla_object.ImageSizeX
        camera_info.height = self.carla_object.ImageSizeY
        camera_info.distortion_model = 'plumb_bob'
        cx = self.carla_object.ImageSizeX / 2.0
        cy = self.carla_object.ImageSizeY / 2.0
        fx = self.carla_object.ImageSizeX / (
            2.0 * math.tan(self.carla_object.FOV * math.pi / 360.0))
        fy = fx
        camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]
        camera_info.D = [0, 0, 0, 0, 0]
        camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]
        camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0]
        self._camera_info = camera_info
def test():
    rospy.wait_for_service(service_name)

    # build theoretical calibration for Unibrain Fire-i camera in
    # 640x480 mode
    cinfo = CameraInfo()
    cinfo.width = 1360
    cinfo.height = 1024
    #cx = (cinfo.width - 1.0)/2.0
    #cy = (cinfo.height - 1.0)/2.0
    #fx = fy = 0.0043                    # Unibrain Fire-i focal length
    #fx = 1046.16197
    #fy = 1043.87094
    #cx = 695.62128
    #cy = 544.32183
    #k1 = -0.23268
    #k2 = 0.08580
    #p1 = 0.00098
    #p2 = -0.00022
    #cinfo.K = [fx, 0., cx, 0., fy, cy, 0., 0., 1.]
    #cinfo.R = [1., 0., 0., 0., 1., 0., 0., 0., 1.]
    #cinfo.P = [fx, 0., cx, 0., 0., fy, cy, 0., 0., 0., 1., 0.]

    cinfo.K = [430.21554970319971, 0.0, 306.6913434743704, 0.0, 430.53169252696676, 227.22480030078816, 0.0, 0.0, 1.0]
    cinfo.D = [-0.33758562758914146, 0.11161239414304096, -0.00021819272592442094, -3.029195446330518e-05, 0]

    #cinfo.K = [4827.94, 0, 1223.5, 0, 4835.62, 1024.5, 0, 0, 1]
    #cinfo.D = [-0.41527, 0.31874, -0.00197, 0.00071, 0]
    #cinfo.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
    #cinfo.P = [4827.94, 0, 1223.5, 0, 0, 4835.62, 1024.5, 0, 0, 0, 1, 0]


    try:
        set_camera_info = rospy.ServiceProxy(service_name, SetCameraInfo)
        response = set_camera_info(cinfo)
        rospy.loginfo("set_camera_info: success=" + str(response.success)
                      + ", status=" + str(response.status_message))
        return response.success
    except rospy.ServiceException, e:
        print "Service call failed: %s"%e
def get_camera_info(hard_coded=True):

    if hard_coded:
        cx = 319.5
        cy = 239.5
        fx = 525.5
        fy = 525.5

        return (cx, cy, fx, fy)

    #if we are using a different camera, then
    #we can listen to the ros camera info topic for that device
    #and get our values here.
    else:

        import image_geometry
        from sensor_msgs.msg import CameraInfo

        cam_info = CameraInfo()

        cam_info.height = 480
        cam_info.width = 640
        cam_info.distortion_model = "plumb_bob"
        cam_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        cam_info.K = [525.0, 0.0, 319.5, 0.0, 525.0, 239.5, 0.0, 0.0, 1.0]
        cam_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        cam_info.P = [525.0, 0.0, 319.5, 0.0, 0.0, 525.0, 239.5, 0.0, 0.0, 0.0, 1.0, 0.0]
        cam_info.binning_x = 0
        cam_info.binning_y = 0
        cam_info.roi.x_offset = 0
        cam_info.roi.y_offset = 0
        cam_info.roi.height = 0
        cam_info.roi.width = 0
        cam_info.roi.do_rectify = False

        camera_model = image_geometry.PinholeCameraModel()
        camera_model.fromCameraInfo(cam_info)

        return camera_model.cx(), camera_model.cy(), camera_model.fx(), camera_model.fy()
示例#24
0
 def dr_callback(self, config, level):
     ci = CameraInfo()
     ci.header.stamp = rospy.Time.now()
     ci.width = config['width']
     ci.height = config['height']
     ci.distortion_model = config['distortion_model']
     ci.D = [config['d0'], config['d1'], config['d2'], config['d3'], config['d4']]
     ci.K[0 * 3 + 0] = config['fx']
     ci.K[0 * 3 + 2] = config['cx']
     ci.K[1 * 3 + 1] = config['fy']
     ci.K[1 * 3 + 2] = config['cy']
     ci.K[2 * 3 + 2] = 1
     ci.P[0 * 4 + 0] = config['fx']
     ci.P[0 * 4 + 2] = config['cx']
     ci.P[1 * 4 + 1] = config['fy']
     ci.P[1 * 4 + 2] = config['cy']
     ci.P[2 * 4 + 2] = 1
     ci.R[0] = 1
     ci.R[4] = 1
     ci.R[8] = 1
     self.camera_info = ci
     self.pub.publish(ci)
     return config
def loadCalibrationFile(filename, cname):
    """ Load calibration data from a file.

    This function returns a `sensor_msgs/CameraInfo`_ message, based
    on the filename parameter.  An empty or non-existent file is *not*
    considered an error; a null CameraInfo being provided in that
    case.

    :param filename: location of CameraInfo to read
    :param cname: Camera name.
    :returns: `sensor_msgs/CameraInfo`_ message containing calibration,
              if file readable; null calibration message otherwise.
    :raises: :exc:`IOError` if an existing calibration file is unreadable.

    """
    ci = CameraInfo()
    try:
        f = open(filename)
        calib = yaml.load(f)
        if calib is not None:
            if calib['camera_name'] != cname:
                rospy.logwarn("[" + cname + "] does not match name " +
                              calib['camera_name'] + " in file " + filename)

            # fill in CameraInfo fields
            ci.width = calib['image_width']
            ci.height = calib['image_height']
            ci.distortion_model = calib['distortion_model']
            ci.D = calib['distortion_coefficients']['data']
            ci.K = calib['camera_matrix']['data']
            ci.R = calib['rectification_matrix']['data']
            ci.P = calib['projection_matrix']['data']

    except IOError:                     # OK if file did not exist
        pass

    return ci
示例#26
0
 def get_camera_info(self, camera_name, img_name='depth'):
     camera_info = CameraInfo()
     file_url = ''
     try : 
         file_url = rospy.get_param(camera_name+'/driver/'+img_name+'_camera_info_url').replace('file://','')
     except Exception,e: print e
             
     if not os.path.exists(file_url):
         print 'ERROR: Could not read '+ camera_name+ ' '+img_name +'_camera_info'
         print '     Calibrate the sensor and try again !'
         exit(0)
         return
 
     print 'Loading camera '+img_name +'_camera_info for '+camera_name+' at:',file_url
     with open(file_url, 'r') as f:
         calib = yaml.safe_load(f.read())
         camera_info.K = np.matrix(calib["camera_matrix"]["data"])
         camera_info.D = np.array(calib["distortion_coefficients"]["data"])
         camera_info.R = np.matrix(calib["rectification_matrix"]["data"])
         camera_info.P = np.matrix(calib["projection_matrix"]["data"])
         camera_info.height = calib["image_height"]
         camera_info.width = calib["image_width"]
         print camera_info
     return camera_info
示例#27
0
# get color camera data
profile = pipeline.get_active_profile()
color_profile = rs.video_stream_profile(profile.get_stream(rs.stream.color))
color_intrinsics = color_profile.get_intrinsics()

camera_info = CameraInfo()
camera_info.width = color_intrinsics.width
camera_info.height = color_intrinsics.height
camera_info.distortion_model = 'plumb_bob'
cx = color_intrinsics.ppx
cy = color_intrinsics.ppy
fx = color_intrinsics.fx
fy = color_intrinsics.fy
camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]
camera_info.D = [0, 0, 0, 0, 0]
camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]
camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0]

bridge = CvBridge()

print("Start node")

while not rospy.is_shutdown():

    # Get data from cameras
    frames = pipeline.wait_for_frames()
    color_frame = frames.get_color_frame()
    depth_frame = frames.get_depth_frame()

    timestamp = frames.get_timestamp()
import rospy
from sensor_msgs.msg import CameraInfo, PointCloud2

if __name__ == "__main__":
    rospy.init_node("laser_camera_fov_sample")
    pub_info = rospy.Publisher("~info", CameraInfo)
    pub_cloud = rospy.Publisher("~cloud", PointCloud2)
    rate = rospy.Rate(1)
    while not rospy.is_shutdown():
        info = CameraInfo()
        info.header.stamp = rospy.Time.now()
        info.header.frame_id = "origin"
        info.height = 544
        info.width = 1024
        info.D = [-0.20831339061260223, 0.11341656744480133,
                  -0.00035378438769839704, -1.746419547998812e-05,
                  0.013720948249101639, 0.0, 0.0, 0.0]
        info.K = [598.6097412109375, 0.0, 515.5960693359375,
                  0.0, 600.0813598632812, 255.42999267578125,
                  0.0, 0.0, 1.0]
        info.R = [0.999993085861206, 0.0022128731943666935, -0.0029819998890161514,
                  -0.0022144035901874304, 0.9999974370002747, -0.0005100672133266926,
                  0.002980863442644477, 0.0005166670307517052, 0.9999954104423523]
        info.P = [575.3445434570312, 0.0, 519.5, 0.0,
                  0.0, 575.3445434570312, 259.5, 0.0,
                  0.0, 0.0, 1.0, 0.0]
        pub_info.publish(info)
        cloud = PointCloud2()
        cloud.header.frame_id = "origin"
        pub_cloud.publish(cloud)
        rate.sleep()
示例#29
0
    def grabAndPublish(self, publisher):
        # Grab image from simulation and apply the action
        obs, reward, done, info = self.env.step(self.action)
        #rospy.loginfo('[%s] step_count = %s, reward=%.3f' % (self.node_name, self.env.unwrapped.step_count, reward))

        image = cv2.cvtColor(obs, cv2.COLOR_BGR2RGB)  # Correct color for cv2
        """
        ##show image
        cv2.namedWindow("Image")
        if (not image is None):
            cv2.imshow("Image",image)
        if cv2.waitKey(1)!=-1:     #Burak, needs to modify this line to work on your computer, THANKS!
            cv2.destroyAllWindows()
        """
        # Publish raw image
        image_msg = self.bridge.cv2_to_imgmsg(image, "bgr8")

        image_msg.header.stamp = rospy.Time.now()
        # Publish
        publisher.publish(image_msg)

        cam_msg = CameraInfo()
        cam_msg.height = 480
        cam_msg.width = 640
        #cam_msg.height = 240
        #cam_msg.width= 320
        cam_msg.header.stamp = image_msg.header.stamp
        cam_msg.header.frame_id = 'cam'

        cam_msg.distortion_model = 'plumb_bob'
        cam_msg.D = [
            -0.2, 0.0305, 0.0005859930422629722, -0.0006697840226199427, 0
        ]
        cam_msg.K = [
            305.5718893575089,
            0,
            303.0797142544728,
            0,
            308.8338858195428,
            231.8845403702499,
            0,
            0,
            1,
        ]
        cam_msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        cam_msg.P = [
            220.2460277141687,
            0,
            301.8668918355899,
            0,
            0,
            238.6758484095299,
            227.0880056118307,
            0,
            0,
            0,
            1,
            0,
        ]

        self.pub_cam.publish(cam_msg)

        if not self.has_published:
            rospy.loginfo("[%s] Published the first image." % (self.node_name))
            self.has_published = True

        #if done and (self.env.unwrapped.step_count != 1500):
        #rospy.logwarn("[%s] DONE! RESETTING." %(self.node_name))
        #self.env.reset()

        self.env.render()
示例#30
0
    def run(self):
        img = Image()
        r = rospy.Rate(self.config['frame_rate'])
        while self.is_looping():
            if self.pub_img_.get_num_connections() == 0:
                if self.nameId:
                    rospy.loginfo('Unsubscribing from camera as nobody listens to the topics.')
                    self.camProxy.unsubscribe(self.nameId)
                    self.nameId = None
                r.sleep()
                continue
            if self.nameId is None:
                self.reconfigure(self.config, 0)
                r.sleep()
                continue
            image = self.camProxy.getImageRemote(self.nameId)
            if image is None:
                continue
            # Deal with the image
            if self.config['use_ros_time']:
                img.header.stamp = rospy.Time.now()
            else:
                img.header.stamp = rospy.Time(image[4] + image[5]*1e-6)
            img.header.frame_id = self.frame_id
            img.height = image[1]
            img.width = image[0]
            nbLayers = image[2]
            if image[3] == kYUVColorSpace:
                encoding = "mono8"
            elif image[3] == kRGBColorSpace:
                encoding = "rgb8"
            elif image[3] == kBGRColorSpace:
                encoding = "bgr8"
            elif image[3] == kYUV422ColorSpace:
                encoding = "yuv422" # this works only in ROS groovy and later
            elif image[3] == kDepthColorSpace:
                encoding = "mono16"
            else:
                rospy.logerr("Received unknown encoding: {0}".format(image[3]))
            img.encoding = encoding
            img.step = img.width * nbLayers
            img.data = image[6]

            self.pub_img_.publish(img)

            # deal with the camera info
            if self.config['source'] == kDepthCamera and image[3] == kDepthColorSpace:
                infomsg = CameraInfo()
                # yes, this is only for an XTion / Kinect but that's the only thing supported by NAO
                ratio_x = float(640)/float(img.width)
                ratio_y = float(480)/float(img.height)
                infomsg.width = img.width
                infomsg.height = img.height
                # [ 525., 0., 3.1950000000000000e+02, 0., 525., 2.3950000000000000e+02, 0., 0., 1. ]
                infomsg.K = [ 525, 0, 3.1950000000000000e+02,
                              0, 525, 2.3950000000000000e+02,
                              0, 0, 1 ]
                infomsg.P = [ 525, 0, 3.1950000000000000e+02, 0,
                              0, 525, 2.3950000000000000e+02, 0,
                              0, 0, 1, 0 ]
                for i in range(3):
                    infomsg.K[i] = infomsg.K[i] / ratio_x
                    infomsg.K[3+i] = infomsg.K[3+i] / ratio_y
                    infomsg.P[i] = infomsg.P[i] / ratio_x
                    infomsg.P[4+i] = infomsg.P[4+i] / ratio_y

                infomsg.D = []
                infomsg.binning_x = 0
                infomsg.binning_y = 0
                infomsg.distortion_model = ""

                infomsg.header = img.header
                self.pub_info_.publish(infomsg)
            elif self.config['camera_info_url'] in self.camera_infos:
                infomsg = self.camera_infos[self.config['camera_info_url']]

                infomsg.header = img.header
                self.pub_info_.publish(infomsg)

            r.sleep()

        if (self.nameId):
            rospy.loginfo("unsubscribing from camera ")
            self.camProxy.unsubscribe(self.nameId)
示例#31
0
    # info_msg.height = 640
    info_msg.height = 240
    info_msg.width = 240
    # fovx = 98
    fovx = 11.421
    # fovy = 114
    # fovy = 98
    fovy = 11.421

    fx = info_msg.width / 2.0 / \
         np.tan(np.deg2rad(fovx / 2.0))
    fy = info_msg.height / 2.0 / \
         np.tan(np.deg2rad(fovy / 2.0))
    cx = info_msg.width / 2.0
    cy = info_msg.height / 2.0
    info_msg.D = [-0.071069, 0.024841, -6.7e-05, 0.024236, 0.0]
    info_msg.K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1.0])
    info_msg.P = np.array([fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1, 0])
    info_msg.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

    # info_msg.height = 480
    # info_msg.width = 640
    # fovx = 60
    # fovy = 49.5

    # info_msg.D = [-0.071069, 0.024841, -6.7e-05, 0.024236, 0.0]
    # info_msg.K = [496.734185, 0.0, 368.095466, 0.0, 491.721745, 248.578629, 0.0, 0.0, 1.0]
    # info_msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
    # info_msg.P = [471.903168, 0.0, 386.903326, 0.0, 0.0, 489.443268, 248.733955, 0.0, 0.0, 0.0, 1.0, 0.0]

    rospy.Timer(rospy.Duration(1.0 / 1), timer_cb)
示例#32
0
    def publish(self, incomingData):
        if "apriltags" in incomingData:
            for tag in incomingData["apriltags"]:
                # Publish the relative pose
                newApriltagDetectionMsg = AprilTagExtended()
                newApriltagDetectionMsg.header.stamp.secs = int(
                    tag["timestamp_secs"])
                newApriltagDetectionMsg.header.stamp.nsecs = int(
                    tag["timestamp_nsecs"])
                newApriltagDetectionMsg.header.frame_id = str(tag["source"])
                newApriltagDetectionMsg.transform.translation.x = float(
                    tag["tvec"][0])
                newApriltagDetectionMsg.transform.translation.y = float(
                    tag["tvec"][1])
                newApriltagDetectionMsg.transform.translation.z = float(
                    tag["tvec"][2])
                newApriltagDetectionMsg.transform.rotation.x = float(
                    tag["qvec"][0])
                newApriltagDetectionMsg.transform.rotation.y = float(
                    tag["qvec"][1])
                newApriltagDetectionMsg.transform.rotation.z = float(
                    tag["qvec"][2])
                newApriltagDetectionMsg.transform.rotation.w = float(
                    tag["qvec"][3])
                newApriltagDetectionMsg.tag_id = int(tag["tag_id"])
                newApriltagDetectionMsg.tag_family = tag["tag_family"]
                newApriltagDetectionMsg.hamming = int(tag["hamming"])
                newApriltagDetectionMsg.decision_margin = float(
                    tag["decision_margin"])
                newApriltagDetectionMsg.homography = tag["homography"].flatten(
                )
                newApriltagDetectionMsg.center = tag["center"]
                newApriltagDetectionMsg.corners = tag["corners"].flatten()
                newApriltagDetectionMsg.pose_error = tag["pose_error"]

                self.publish_queue.put({
                    "topic": "apriltags",
                    "message": newApriltagDetectionMsg
                })
                # self.publishers["apriltags"].publish(newApriltagDetectionMsg)
                # self.logger.info("Published pose for tag %d in sequence %d" % (
                #     tag["tag_id"], self.seq_stamper))

        # Publish the test and raw data if submitted and requested:
        if self.ACQ_TEST_STREAM:
            if "test_stream_image" in incomingData:
                imgMsg = incomingData["test_stream_image"]
                imgMsg.header.seq = self.seq_stamper
                self.publish_queue.put({
                    "topic": "test_stream_image",
                    "message": imgMsg
                })
                # self.publishers["test_stream_image"].publish(imgMsg)

            if "raw_image" in incomingData:
                imgMsg = incomingData["raw_image"]
                imgMsg.header.seq = self.seq_stamper
                self.publish_queue.put({
                    "topic": "raw_image",
                    "message": imgMsg
                })
                # self.publishers["raw_image"].publish(imgMsg)

            if "rectified_image" in incomingData:
                imgMsg = incomingData["rectified_image"]
                imgMsg.header.seq = self.seq_stamper
                self.publish_queue.put({
                    "topic": "rectified_image",
                    "message": imgMsg
                })
                # self.publishers["rectified_image"].publish(imgMsg)

            if "raw_camera_info" in incomingData:
                self.publish_queue.put({
                    "topic":
                    "raw_camera_info",
                    "message":
                    incomingData["raw_camera_info"]
                })

                # self.publishers["raw_camera_info"].publish(
                #     incomingData["raw_camera_info"])

            if "new_camera_matrix" in incomingData:
                new_camera_info = CameraInfo()
                try:
                    new_camera_info.header = incomingData[
                        "raw_camera_info"].header
                    new_camera_info.height = incomingData["raw_image"].shape[0]
                    new_camera_info.width = incomingData["raw_image"].shape[1]
                    new_camera_info.distortion_model = incomingData[
                        "raw_camera_info"].distortion_model
                    new_camera_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
                except:
                    pass
                new_camera_info.K = incomingData["new_camera_matrix"].flatten()
                self.publish_queue.put({
                    "topic": "new_camera_matrix",
                    "message": new_camera_info
                })
示例#33
0
    def main_loop(self):
        img = Image()
        cimg = Image()
        r = rospy.Rate(15)
        while not rospy.is_shutdown():
            if self.pub_img_.get_num_connections() == 0:
                r.sleep()
                continue

            image = self.camProxy.getImageRemote(self.nameId)
            if image is None:
                continue
            # Deal with the image
            '''
            #Images received from NAO have
            if self.config['use_ros_time']:
                img.header.stamp = rospy.Time.now()
            else:
                img.header.stamp = rospy.Time(image[4] + image[5]*1e-6)
            '''
            img.header.stamp = rospy.Time.now()
            img.header.frame_id = self.frame_id
            img.height = image[1]
            img.width = image[0]
            nbLayers = image[2]
            if image[3] == kDepthColorSpace:
                encoding = "mono16"
            else:
                rospy.logerr("Received unknown encoding: {0}".format(image[3]))
            img.encoding = encoding
            img.step = img.width * nbLayers
            img.data = image[6]

            self.pub_img_.publish(img)
            
            # deal with the camera info
            infomsg = CameraInfo()
            infomsg.header = img.header
            # yes, this is only for an XTion / Kinect but that's the only thing supported by NAO
            ratio_x = float(640)/float(img.width)
            ratio_y = float(480)/float(img.height)
            infomsg.width = img.width
            infomsg.height = img.height
            # [ 525., 0., 3.1950000000000000e+02, 0., 525., 2.3950000000000000e+02, 0., 0., 1. ]
            infomsg.K = [ 525, 0, 3.1950000000000000e+02,
                         0, 525, 2.3950000000000000e+02,
                         0, 0, 1 ]
            infomsg.P = [ 525, 0, 3.1950000000000000e+02, 0,
                         0, 525, 2.3950000000000000e+02, 0,
                         0, 0, 1, 0 ]

            for i in range(3):
                infomsg.K[i] = infomsg.K[i] / ratio_x
                infomsg.K[3+i] = infomsg.K[3+i] / ratio_y
                infomsg.P[i] = infomsg.P[i] / ratio_x
                infomsg.P[4+i] = infomsg.P[4+i] / ratio_y

            infomsg.D = []
            infomsg.binning_x = 0
            infomsg.binning_y = 0
            infomsg.distortion_model = ""
            self.pub_info_.publish(infomsg)

	    #Currently we only get depth image from the 3D camera of NAO, so we make up a fake color image (a black image)
            #and publish it under image_color topic.
            #This should be update when the color image from 3D camera becomes available.
            colorimg = np.zeros((img.height,img.width,3), np.uint8)
            try:
              cimg = self.bridge.cv2_to_imgmsg(colorimg, "bgr8")
              cimg.header.stamp = img.header.stamp
              cimg.header.frame_id = img.header.frame_id
              self.pub_cimg_.publish(cimg)
            except CvBridgeError, e:
              print e

            r.sleep()
示例#34
0
    def callback(self, raw_rgb_img):
        '''Runs depth estimation on live webcam video stream'''
        #Resize image
        rgb_img = self.bridge.imgmsg_to_cv2(raw_rgb_img,
                                            "bgr8")  #544 High x 1024 wide
        # print type(rgb_img) #np.ndarray
        # print rgb_img.shape #544x1024x3
        img = rgb_img[288:480, :, :]
        rgb_img = img

        img = img.reshape(1, 192, 640, 3)
        img = np.divide(img, 255).astype(np.float16)
        #Predict depth
        y_est = self.model.predict(img)
        y_est = y_est.reshape((192, 640))

        #Thresholding
        for ii, row in enumerate(y_est):
            for jj, value in enumerate(row):
                if (y_est[ii][jj]) > 0.02:  #max=0.115, min=0.0009
                    y_est[ii][jj] = 0

        y_est = y_est.reshape((192, 640, 1))

        #Define ROS messages
        h = Header()
        h.stamp = raw_rgb_img.header.stamp
        #h.stamp=rospy.Time.now()
        h.frame_id = 'camera_link'
        #Define depth image message
        depth_img = self.bridge.cv2_to_imgmsg((y_est * 255).astype(np.float32),
                                              "32FC1")
        depth_img.header = h

        #Define rgb message
        rgb_img = self.bridge.cv2_to_imgmsg(rgb_img, "bgr8")
        rgb_img.header = h

        #Define camera calibration info. message
        cal_msg = CameraInfo()
        cal_msg.header = h
        cal_msg.distortion_model = "plumb_bob"
        cal_msg.height = 192
        cal_msg.width = 640

        #FROM FIELDSAFE MULTISENSE CAMERA
        #Distortion coefficients
        cal_msg.D = [
            0.0030753163155168295, 0.002497022273018956, 0.0003005412872880697,
            0.001575434347614646, -0.003454494522884488, 0.0, 0.0, 0.0
        ]
        #Intrinsic Camera Matrix
        cal_msg.K = [
            555.9204711914062, 0.0, 498.1905517578125, 0.0, 556.6275634765625,
            252.35089111328125, 0.0, 0.0, 1.0
        ]
        cal_msg.R = [
            0.9999634027481079, -0.000500216381624341, 0.00853759702295065,
            0.0005011018947698176, 0.9999998807907104, -0.00010158627264900133,
            -0.00853754486888647, 0.00010586075950413942, 0.9999635219573975
        ]
        #Projection Matrix
        cal_msg.P = [
            580.6427001953125, 0.0, 512.0, 0.0, 0.0, 580.6427001953125, 254.5,
            0.0, 0.0, 0.0, 1.0, 0.0
        ]

        #Publish messages
        self.pub.publish(depth_img)
        self.rgb_pub.publish(rgb_img)
        self.rgb_pub_cal.publish(cal_msg)
                                      CameraInfo,
                                      queue_size=10)
    names_pub = rospy.Publisher(names_topic_to_stream,
                                std_msgs.msg.String,
                                queue_size=10)
    yaml_data = numpy.asarray(cv2.cv.Load(directory + "/calib_ir.yaml"))

    cam_info = CameraInfo()
    cam_info.height = 424
    cam_info.width = 512
    cam_info.distortion_model = 'plumb_bob'
    cam_info.K[0] = yaml_data[0, 0]
    cam_info.K[2] = yaml_data[0, 2]
    cam_info.K[4] = yaml_data[1, 1]
    cam_info.K[5] = yaml_data[1, 2]
    cam_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
    cam_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
    cam_info.P[0 * 4 + 0] = yaml_data[0, 0]
    cam_info.P[0 * 4 + 2] = yaml_data[0, 2]
    cam_info.P[1 * 4 + 1] = yaml_data[1, 1]
    cam_info.P[1 * 4 + 2] = yaml_data[1, 2]
    cam_info.P[2 * 4 + 2] = 1
    for i in xrange(len(image_list_rgb)):
        print(str(i).zfill(4) + " / " + str(len(image_list_rgb)))
        now = rospy.get_rostime()
        rgb_frame = cv2.imread(image_list_rgb[i], cv2.IMREAD_COLOR)
        depth_frame = cv2.imread(image_list_depth[i], cv2.IMREAD_ANYDEPTH)
        # depth_frame = depth_frame[:, 154:]
        # depth_frame = depth_frame[:, :-154]
        # depth_frame = cv2.resize(depth_frame,(424, 512), interpolation = cv2.INTER_CUBIC)
        msg_frame_rgb = CvBridge().cv2_to_imgmsg(rgb_frame, encoding="bgr8")
示例#36
0
    def run(self):
        # left_cam_info = self.yaml_to_camera_info(self.left_yaml_file)
        # right_cam_info = self.yaml_to_camera_info(self.right_yaml_file)
        left_cam_info = CameraInfo()
        left_cam_info.width = 640
        left_cam_info.height = 480
        left_cam_info.K = [
            883.998642, 0.000000, 349.540253, 0.000000, 887.969815, 247.902874,
            0.000000, 0.000000, 1.000000
        ]
        left_cam_info.D = [0.125962, -0.905045, 0.006512, 0.007531, 0.000000]
        left_cam_info.R = [
            0.985389, 0.006639, 0.170189, -0.004920, 0.999933, -0.010521,
            -0.170248, 0.009530, 0.985355
        ]
        left_cam_info.P = [
            1022.167889, 0.000000, 150.220785, 0.000000, 0.000000, 1022.167889,
            249.024044, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000
        ]
        left_cam_info.distortion_model = 'plumb_bob'

        right_cam_info = CameraInfo()
        right_cam_info.width = 640
        right_cam_info.height = 480
        right_cam_info.K = [
            874.019843, 0.000000, 331.121922, 0.000000, 875.918758, 244.867443,
            0.000000, 0.000000, 1.000000
        ]
        right_cam_info.D = [0.041385, -0.059698, 0.005392, 0.009075, 0.000000]
        right_cam_info.R = [
            0.976610, 0.003803, 0.214985, -0.005979, 0.999937, 0.009472,
            -0.214936, -0.010535, 0.976571
        ]
        right_cam_info.P = [
            1022.167889, 0.000000, 150.220785, -41.006903, 0.000000,
            1022.167889, 249.024044, 0.000000, 0.000000, 0.000000, 1.000000,
            0.000000
        ]
        right_cam_info.distortion_model = 'plumb_bob'

        rate = rospy.Rate(20)
        while not rospy.is_shutdown():
            ret, frame = self.cam.read()
            if not ret:
                print('[ERROR]: frame error')
                break
            expand_frame = cv2.resize(frame, None, fx=2, fy=1)

            left_image = expand_frame[0:480, 0:640]
            right_image = expand_frame[0:480, 640:1280]

            self.msg_header.frame_id = 'stereo_image'
            self.msg_header.stamp = rospy.Time.now()
            left_cam_info.header = self.msg_header
            right_cam_info.header = self.msg_header
            self.left_image_info_pub.publish(left_cam_info)
            self.right_image_info_pub.publish(right_cam_info)
            # self.pub_image(self.left_image_pub, left_image, self.msg_header )
            # self.pub_image(self.right_image_pub, right_image, self.msg_header )

            try:
                thread.start_new_thread(self.pub_image, (
                    self.left_image_pub,
                    left_image,
                    self.msg_header,
                ))
                thread.start_new_thread(self.pub_image, (
                    self.right_image_pub,
                    right_image,
                    self.msg_header,
                ))
            except:
                print("[ERROR]: unable to start thread")
            rate.sleep()
示例#37
0
import time
from cv_bridge import CvBridge, CvBridgeError

#cv2.namedWindow("test")

with Vimba() as vimba:
    #camera info
    camera_info_msg = CameraInfo()
    camera_info_msg.header.frame_id = "avt_manta"
    camera_info_msg.width = 1624
    camera_info_msg.height = 1234
    camera_info_msg.K = [
        1792.707269, 0.0, 821.895887, 0.0, 1790.871098, 624.859714, 0.0, 0.0,
        1.0
    ]
    camera_info_msg.D = [-0.225015, 0.358593, -0.005422, 0.009070, 0.0]
    camera_info_msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
    camera_info_msg.P = [
        1736.461670, 0.0, 834.094334, 0.0, 0.0, 1751.635254, 621.266132, 0.0,
        0.0, 0.0, 1.0, 0.0
    ]
    camera_info_msg.distortion_model = "narrow_stereo"
    ##########

    system = vimba.getSystem()

    system.runFeatureCommand("GeVDiscoveryAllOnce")
    time.sleep(0.2)

    camera_ids = vimba.getCameraIds()
import rospy
from sensor_msgs.msg import CameraInfo, PointCloud2

if __name__ == "__main__":
    rospy.init_node("sample_camera_info_and_pointcloud_publisher")
    pub_info = rospy.Publisher("~info", CameraInfo, queue_size=1)
    pub_cloud = rospy.Publisher("~cloud", PointCloud2, queue_size=1)
    rate = rospy.Rate(1)
    while not rospy.is_shutdown():
        info = CameraInfo()
        info.header.stamp = rospy.Time.now()
        info.header.frame_id = "origin"
        info.height = 544
        info.width = 1024
        info.D = [
            -0.20831339061260223, 0.11341656744480133, -0.00035378438769839704,
            -1.746419547998812e-05, 0.013720948249101639, 0.0, 0.0, 0.0
        ]
        info.K = [
            598.6097412109375, 0.0, 515.5960693359375, 0.0, 600.0813598632812,
            255.42999267578125, 0.0, 0.0, 1.0
        ]
        info.R = [
            0.999993085861206, 0.0022128731943666935, -0.0029819998890161514,
            -0.0022144035901874304, 0.9999974370002747, -0.0005100672133266926,
            0.002980863442644477, 0.0005166670307517052, 0.9999954104423523
        ]
        info.P = [
            575.3445434570312, 0.0, 519.5, 0.0, 0.0, 575.3445434570312, 259.5,
            0.0, 0.0, 0.0, 1.0, 0.0
        ]
        pub_info.publish(info)
示例#39
0
with open(file_cfg) as f:
    lines = f.readlines()

parser = lambda label: map(float, [line for line in lines \
    if line.startswith(label)][0].strip().split('(')[1].split(')')[0].split(','))

i = parser('CAMERA%s_INTRINSIC:'%camera)
d = parser('CAMERA%s_DISTORTION:'%camera)
# TODO
intrinsic = [[0]*3]*3
# http://docs.ros.org/api/sensor_msgs/html/msg/CameraInfo.html
# http://wiki.ros.org/image_pipeline/CameraInfo
camera_info = CameraInfo()
camera_info.distortion_model = 'plumb_bob'
# fill this 3 parameters to get correcty image with stereo camera
Tx = 0
Ty = 0
R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
camera_info.D = [0]
camera_info.K = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2],
                 intrinsic[1][0], intrinsic[1][1], intrinsic[1][2],
                 intrinsic[2][0], intrinsic[2][1], intrinsic[2][2]]
camera_info.R = R
camera_info.P = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2], Tx,
                 intrinsic[1][0], intrinsic[1][1], intrinsic[1][2], Ty,
                 intrinsic[2][0], intrinsic[2][1], intrinsic[2][2], 0]

with open(file_yml, 'w') as f:
    f.write(str(camera_info))