Пример #1
0
    def __init__(self):
        rospy.init_node('image_publish')
        name = sys.argv[1]
        image = cv2.imread(name)
        #cv2.imshow("im", image)
        #cv2.waitKey(5)

        hz = rospy.get_param("~rate", 1)
        frame_id = rospy.get_param("~frame_id", "map")
        use_image = rospy.get_param("~use_image", True)
        rate = rospy.Rate(hz)

        self.ci_in = None
        ci_sub = rospy.Subscriber('camera_info_in', CameraInfo,
                                  self.camera_info_callback, queue_size=1)

        if use_image:
            pub = rospy.Publisher('image', Image, queue_size=1)
        ci_pub = rospy.Publisher('camera_info', CameraInfo, queue_size=1)

        msg = Image()
        msg.header.stamp = rospy.Time.now()
        msg.header.frame_id = frame_id
        msg.encoding = 'bgr8'
        msg.height = image.shape[0]
        msg.width = image.shape[1]
        msg.step = image.shape[1] * 3
        msg.data = image.tostring()
        if use_image:
            pub.publish(msg)

        ci = CameraInfo()
        ci.header = msg.header
        ci.height = msg.height
        ci.width = msg.width
        ci.distortion_model ="plumb_bob"
        # TODO(lucasw) need a way to set these values- have this node
        # subscribe to an input CameraInfo?
        ci.D = [0.0, 0.0, 0.0, 0, 0]
        ci.K = [500.0, 0.0, msg.width/2, 0.0, 500.0, msg.height/2, 0.0, 0.0, 1.0]
        ci.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        ci.P = [500.0, 0.0, msg.width/2, 0.0, 0.0, 500.0, msg.height/2, 0.0,  0.0, 0.0, 1.0, 0.0]
        # ci_pub.publish(ci)

        # TODO(lwalter) only run this is hz is positive,
        # otherwise wait for input trigger message to publish an image
        while not rospy.is_shutdown():
            if self.ci_in is not None:
                ci = self.ci_in

            msg.header.stamp = rospy.Time.now()
            ci.header = msg.header
            if use_image:
                pub.publish(msg)
            ci_pub.publish(ci)

            if hz <= 0:
                rospy.sleep()
            else:
                rate.sleep()
Пример #2
0
def sim():
    global t, pose, camInfoMsg
    global centerPub, targetVelPub, camVelPub, camInfoPub, br, tfl
    global switchTimer, imageTimer, estimatorOn
    
    rospy.init_node("sim")
    estimatorOn = True
    
    centerPub = rospy.Publisher("markerCenters",Center,queue_size=10)
    targetVelPub = rospy.Publisher("ugv0/body_vel",TwistStamped,queue_size=10)
    camVelPub = rospy.Publisher("image/body_vel",TwistStamped,queue_size=10)
    cameraName = rospy.get_param(rospy.get_name()+"/camera","camera")
    camInfoPub = rospy.Publisher(cameraName+"/camera_info",CameraInfo,queue_size=1)
    br = tf.TransformBroadcaster()
    tfl = tf.TransformListener()
    
    # Camera parameters
    camInfoMsg = CameraInfo()
    camInfoMsg.D = distCoeffs.tolist()
    camInfoMsg.K = camMat.reshape((-1)).tolist()
    
    # Wait for node to get cam info
    while (camVelPub.get_num_connections() == 0) and (not rospy.is_shutdown()):
        # publish camera parameters
        camInfoPub.publish(camInfoMsg)
        rospy.sleep(0.5)
    
    # Publishers
    rospy.Timer(rospy.Duration(1.0/velRate),velPubCB)
    imageTimer = rospy.Timer(rospy.Duration(1.0/frameRate),imagePubCB)
    rospy.Timer(rospy.Duration(0.5),camInfoPubCB)
    switchTimer = rospy.Timer(rospy.Duration(11.0),switchCB,oneshot=True)
    
    # Initial conditions
    startTime = rospy.get_time()
    camPos = np.array([0,-1,1.5])
    camOrient = np.array([-1*np.sqrt(2)/2,0,0,np.sqrt(2)/2])
    targetPos = np.array([0,1,0])
    targetOrient = np.array([0,0,0,1])
    pose = np.concatenate((camPos,camOrient,targetPos,targetOrient))
    
    r = rospy.Rate(intRate)
    h = 1.0/intRate
    while not rospy.is_shutdown():
        t = np.array(rospy.get_time() - startTime)
        poseDot = poseDyn(t,pose)
        pose = pose + poseDot*h
        
        # Send Transform
        camPos = pose[0:3]
        camOrient = pose[3:7]
        targetPos = pose[7:10]
        targetOrient = pose[10:]
        br.sendTransform(targetPos,targetOrient,rospy.Time.now(),"ugv0","world")
        br.sendTransform(camPos,camOrient,rospy.Time.now(),"image","world")
        
        r.sleep()
Пример #3
0
def talker():
    pub = rospy.Publisher('camera_info', CameraInfo, queue_size=10)
    rospy.init_node('talker', anonymous=True)
    rate = rospy.Rate(10) # 10hz
    while not rospy.is_shutdown():
        data = CameraInfo()
        #[[ 820.29938083    0.          408.73210359]
 	#[   0.          827.01247867  237.04151422]
	#[   0.            0.            1.        ]]
        data.K = (820,0.,408.73210359,0.,827.01247867,827.01247867,0.,0.,1.)
        pub.publish(data)
        rate.sleep()
Пример #4
0
 def publish( self ):
     # Get the image.
     image = self.__videoDeviceProxy.getImageRemote( self.__videoDeviceProxyName );
         
     # Create Image message.
     ImageMessage = Image();
     ImageMessage.header.stamp.secs = image[ 5 ];
     ImageMessage.width = image[ 0 ];
     ImageMessage.height = image[ 1 ];
     ImageMessage.step = image[ 2 ] * image[ 0 ];
     ImageMessage.is_bigendian = False;
     ImageMessage.encoding = 'bgr8';
     ImageMessage.data = image[ 6 ];
     
     self.__imagePublisher.publish( ImageMessage );
     
     # Create CameraInfo message.
     # Data from the calibration phase is hard coded for now.
     CameraInfoMessage = CameraInfo();
     CameraInfoMessage.header.stamp.secs = image[ 5 ];
     CameraInfoMessage.width = image[ 0 ];
     CameraInfoMessage.height = image[ 1 ];
     CameraInfoMessage.D = [ -0.0769218451517258, 0.16183180613612602, 0.0011626049774280595, 0.0018733894100460534, 0.0 ];
     CameraInfoMessage.K = [ 581.090096189648, 0.0, 341.0926325830606, 0.0, 583.0323248080421, 241.02441593704128, 0.0, 0.0, 1.0 ];
     CameraInfoMessage.R = [ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ];
     CameraInfoMessage.P = [ 580.5918359588198, 0.0, 340.76398441334106, 0.0, 0.0, 582.4042541534321, 241.04182225157172, 0.0, 0.0, 0.0, 1.0, 0.0] ;
     CameraInfoMessage.distortion_model = 'plumb_bob';
     
     #CameraInfoMessage.roi.x_offset = self.__ROIXOffset;
     #CameraInfoMessage.roi.y_offset = self.__ROIYOffset;
     #CameraInfoMessage.roi.width = self.__ROIWidth;
     #CameraInfoMessage.roi.height = self.__ROIHeight;
     #CameraInfoMessage.roi.do_rectify = self.__ROIDoRectify;
     
     self.__cameraInfoPublisher.publish( CameraInfoMessage );
def yaml_to_CameraInfo(calib_yaml):
    """
    Parse a yaml file containing camera calibration data (as produced by
    rosrun camera_calibration cameracalibrator.py) into a
    sensor_msgs/CameraInfo msg.

    Parameters
    ----------
    yaml_fname : str
        Path to yaml file containing camera calibration data

    Returns
    -------
    camera_info_msg : sensor_msgs.msg.CameraInfo
        A sensor_msgs.msg.CameraInfo message containing the camera calibration
        data
    """
    # Load data from file
    calib_data = yaml.load(calib_yaml)
    # Parse
    camera_info_msg = CameraInfo()
    camera_info_msg.width = calib_data["image_width"]
    camera_info_msg.height = calib_data["image_height"]
    camera_info_msg.K = calib_data["camera_matrix"]["data"]
    camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
    camera_info_msg.R = calib_data["rectification_matrix"]["data"]
    camera_info_msg.P = calib_data["projection_matrix"]["data"]
    camera_info_msg.distortion_model = calib_data["distortion_model"]
    return camera_info_msg
    def send_test_messages(self, filename):
        self.msg_received = False
        # Publish the camera info TODO make this a field in the annotations file to dictate the source calibration file
        msg_info = CameraInfo()
        msg_info.height = 480
        msg_info.width = 640
        msg_info.distortion_model = "plumb_bob"
        msg_info.D = [-0.28048157543793056, 0.05674481026365553, -0.000988764087143394, -0.00026869128565781613, 0.0]
        msg_info.K = [315.128501, 0.0, 323.069638, 0.0, 320.096636, 218.012581, 0.0, 0.0, 1.0]
        msg_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg_info.P = [217.48876953125, 0.0, 321.3154072384932, 0.0, 0.0, 250.71084594726562, 202.30416165274983, 0.0,
                      0.0, 0.0, 1.0, 0.0]
        msg_info.roi.do_rectify = False

        # Publish the test image
        img = cv2.imread(filename)
        cvb = CvBridge()
        msg_raw = cvb.cv2_to_imgmsg(img, encoding="bgr8")
        self.pub_info.publish(msg_info)
        self.pub_raw.publish(msg_raw)

        # Wait for the message to be received
        timeout = rospy.Time.now() + rospy.Duration(10)  # Wait at most 5 seconds for the node to reply
        while not self.msg_received and not rospy.is_shutdown() and rospy.Time.now() < timeout:
            rospy.sleep(0.1)
        self.assertLess(rospy.Time.now(), timeout, "Waiting for apriltag detection timed out.")
Пример #7
0
  def __init__(self):
    rospy.init_node('image_converter', anonymous=True)
    self.filtered_face_locations = rospy.get_param('camera_topic')
    self.image_pub = rospy.Publisher(self.filtered_face_locations,Image)
    self.image_info = rospy.Publisher('camera_info',CameraInfo)
    self.bridge = CvBridge()
    cap = cv2.VideoCapture('/home/icog-labs/Videos/video.mp4')
    print cap.get(5)
    path = rospkg.RosPack().get_path("robots_config")
    path = path + "/robot/camera.yaml"
    stream = file(path, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    while (not rospy.is_shutdown()) and (cap.isOpened()):
        self.keystroke = cv2.waitKey(1000 / 30)
        ret, frame = cap.read()

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # cv2.imshow('frame', gray)
        self.image_pub.publish(self.bridge.cv2_to_imgmsg(gray, "mono8"))
        self.image_info.publish(cam_info)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Пример #8
0
def makeROSInfo(image):
	ci = CameraInfo()

	head = Header()
	head.stamp = rospy.Time.now()
	ci.header = head


	w,h = image.shape[:2]

	ci.width = w
	ci.height = h
	ci.distortion_model = 'plumb_bob'

	return ci
Пример #9
0
    def default(self, ci="unused"):
        if not self.component_instance.capturing:
            return  # press [Space] key to enable capturing

        image_local = self.data["image"]

        image = Image()
        image.header = self.get_ros_header()
        image.header.frame_id += "/base_image"
        image.height = self.component_instance.image_height
        image.width = self.component_instance.image_width
        image.encoding = "rgba8"
        image.step = image.width * 4

        # VideoTexture.ImageRender implements the buffer interface
        image.data = bytes(image_local)

        # fill this 3 parameters to get correcty image with stereo camera
        Tx = 0
        Ty = 0
        R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

        intrinsic = self.data["intrinsic_matrix"]

        camera_info = CameraInfo()
        camera_info.header = image.header
        camera_info.height = image.height
        camera_info.width = image.width
        camera_info.distortion_model = "plumb_bob"
        camera_info.K = [
            intrinsic[0][0],
            intrinsic[0][1],
            intrinsic[0][2],
            intrinsic[1][0],
            intrinsic[1][1],
            intrinsic[1][2],
            intrinsic[2][0],
            intrinsic[2][1],
            intrinsic[2][2],
        ]
        camera_info.R = R
        camera_info.P = [
            intrinsic[0][0],
            intrinsic[0][1],
            intrinsic[0][2],
            Tx,
            intrinsic[1][0],
            intrinsic[1][1],
            intrinsic[1][2],
            Ty,
            intrinsic[2][0],
            intrinsic[2][1],
            intrinsic[2][2],
            0,
        ]

        self.publish(image)
        self.topic_camera_info.publish(camera_info)
 def publish(self, event):
     if self.imgmsg is None:
         return
     now = rospy.Time.now()
     # setup ros message and publish
     with self.lock:
         self.imgmsg.header.stamp = now
         self.imgmsg.header.frame_id = self.frame_id
     self.pub.publish(self.imgmsg)
     if self.publish_info:
         info = CameraInfo()
         info.header.stamp = now
         info.header.frame_id = self.frame_id
         info.width = self.imgmsg.width
         info.height = self.imgmsg.height
         self.pub_info.publish(info)
Пример #11
0
def GetCameraInfo(width, height):
	cam_info = CameraInfo()
	cam_info.width = width
	cam_info.height = height
	cam_info.distortion_model = model
	#cam_info.D = [0.0]*5
	#cam_info.K = [0.0]*9
	#cam_info.R = [0.0]*9
	#cam_info.P = [0.0]*12
	cam_info.D = D
        cam_info.K = K
        cam_info.R = R
        cam_info.P = P
	cam_info.binning_x = 0
	cam_info.binning_y = 0
	return cam_info
Пример #12
0
 def publish(self):
     now = rospy.Time.now()
     bridge = cv_bridge.CvBridge()
     img_bgr = cv2.imread(self.file_name)
     if img_bgr is None:
         jsk_logwarn('cannot read the image at {}'
                     .format(self.file_name))
         return
     # resolve encoding
     encoding = rospy.get_param('~encoding', 'bgr8')
     if getCvType(encoding) == 0:
         # mono8
         img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
     elif getCvType(encoding) == 2:
         # 16UC1
         img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
         img = img.astype(np.float32)
         img = img / 255 * (2 ** 16)
         img = img.astype(np.uint16)
     elif getCvType(encoding) == 5:
         # 32FC1
         img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
         img = img.astype(np.float32)
         img /= 255
     elif getCvType(encoding) == 16:
         # 8UC3
         if encoding in ('rgb8', 'rgb16'):
             # RGB
             img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
         else:
             img = img_bgr
     else:
         jsk_logerr('unsupported encoding: {0}'.format(encoding))
         return
     # setup ros message and publish
     imgmsg = bridge.cv2_to_imgmsg(img, encoding=encoding)
     imgmsg.header.stamp = now
     imgmsg.header.frame_id = self.frame_id
     self.pub.publish(imgmsg)
     if self.publish_info:
         info = CameraInfo()
         info.header.stamp = now
         info.header.frame_id = self.frame_id
         info.width = imgmsg.width
         info.height = imgmsg.height
         self.pub_info.publish(info)
Пример #13
0
def sim():
    global t, pose, camInfoMsg
    global centerPub, velPub, camInfoPub, br, tfl
    
    rospy.init_node("sim")
    
    centerPub = rospy.Publisher("markerCenters",Center,queue_size=10)
    velPub = rospy.Publisher("image/local_vel",TwistStamped,queue_size=10)
    cameraName = rospy.get_param(rospy.get_name()+"/camera","camera")
    camInfoPub = rospy.Publisher(cameraName+"/camera_info",CameraInfo,queue_size=1)
    br = tf.TransformBroadcaster()
    tfl = tf.TransformListener()
    
    # Camera parameters
    camInfoMsg = CameraInfo()
    camInfoMsg.D = distCoeffs.tolist()
    camInfoMsg.K = camMat.reshape((-1)).tolist()
    
    # Wait for node to get cam info
    while (velPub.get_num_connections() == 0) and (not rospy.is_shutdown()):
        # publish camera parameters
        camInfoPub.publish(camInfoMsg)
        rospy.sleep(0.5)
    
    # Publishers
    rospy.Timer(rospy.Duration(1.0/velRate),velPubCB)
    rospy.Timer(rospy.Duration(1.0/frameRate),imagePubCB)
    rospy.Timer(rospy.Duration(0.5),camInfoPubCB)
    
    # Initial conditions
    startTime = rospy.get_time()
    camPos = np.array([0,-1,1.5])
    camOrient = np.array([-1*np.sqrt(2)/2,0,0,np.sqrt(2)/2])
    targetPos = np.array([-1.5,1.5,0])
    targetOrient = np.array([0,0,0,1])
    pose = np.concatenate((camPos,camOrient,targetPos,targetOrient))
    
    r = rospy.Rate(intRate)
    h = 1.0/intRate
    while not rospy.is_shutdown():
        t = np.array(rospy.get_time() - startTime)
        poseDot = poseDyn(t,pose)
        pose = pose + poseDot*h
        
        r.sleep()
def parse_yaml(filename):
    stream = file(filename, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    return cam_info
Пример #15
0
def load_cam_info(calib_file):
    cam_info = CameraInfo()
    with open(calib_file,'r') as cam_calib_file :
        cam_calib = yaml.load(cam_calib_file)
        cam_info.height = cam_calib['image_height']
        cam_info.width = cam_calib['image_width']
        cam_info.K = cam_calib['camera_matrix']['data']
        cam_info.D = cam_calib['distortion_coefficients']['data']
        cam_info.R = cam_calib['rectification_matrix']['data']
        cam_info.P = cam_calib['projection_matrix']['data']
        cam_info.distortion_model = cam_calib['distortion_model']
    return cam_info
Пример #16
0
 def parse_yaml(self, filename):
     stream = file(filename, "r")
     calib_data = yaml.load(stream)
     cam_info = CameraInfo()
     cam_info.width = calib_data["image_width"]
     cam_info.height = calib_data["image_height"]
     cam_info.K = calib_data["camera_matrix"]["data"]
     cam_info.D = calib_data["distortion_coefficients"]["data"]
     cam_info.R = calib_data["rectification_matrix"]["data"]
     cam_info.P = calib_data["projection_matrix"]["data"]
     cam_info.distortion_model = calib_data["distortion_model"]
     return cam_info
	def createMsg(self):
		msg = CameraInfo()
		msg.height = 480
		msg.width = 640
		msg.distortion_model = "plumb_bob"
		msg.D = [0.08199114285264993, -0.04549390835713936, -0.00040960290587863145, 0.0009833748346549968, 0.0]
		msg.K = [723.5609128875188, 0.0, 315.9845354772031, 0.0, 732.2615109685506, 242.26660681756633,\
				 0.0, 0.0, 1.0]
		msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
		msg.P = [737.1736450195312, 0.0, 316.0324931112791, 0.0, 0.0, 746.1573486328125, 241.59042622688867,\
				 0.0, 0.0, 0.0, 1.0, 0.0]
		self.msg = msg
def parse_yaml(filename):
    stream = file(filename, "r")
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data["image_width"]
    cam_info.height = calib_data["image_height"]
    cam_info.K = calib_data["camera_matrix"]["data"]
    cam_info.D = calib_data["distortion_coefficients"]["data"]
    cam_info.R = calib_data["rectification_matrix"]["data"]
    cam_info.P = calib_data["projection_matrix"]["data"]
    cam_info.distortion_model = calib_data["distortion_model"]
    cam_info.binning_x = calib_data["binning_x"]
    cam_info.binning_y = calib_data["binning_y"]
    cam_info.roi.x_offset = calib_data["roi"]["x_offset"]
    cam_info.roi.y_offset = calib_data["roi"]["y_offset"]
    cam_info.roi.height = calib_data["roi"]["height"]
    cam_info.roi.width = calib_data["roi"]["width"]
    cam_info.roi.do_rectify = calib_data["roi"]["do_rectify"]

    return cam_info
def parse_yaml(filename):
    stream = file(filename, 'r')
    calib_data = yaml.load(stream)
    cam_info = CameraInfo()
    cam_info.width = calib_data['image_width']
    cam_info.height = calib_data['image_height']
    cam_info.K = calib_data['camera_matrix']['data']
    cam_info.D = calib_data['distortion_coefficients']['data']
    cam_info.R = calib_data['rectification_matrix']['data']
    cam_info.P = calib_data['projection_matrix']['data']
    cam_info.distortion_model = calib_data['distortion_model']
    cam_info.binning_x = calib_data['binning_x']
    cam_info.binning_y = calib_data['binning_y']
    cam_info.roi.x_offset = calib_data['roi']['x_offset']
    cam_info.roi.y_offset = calib_data['roi']['y_offset']
    cam_info.roi.height = calib_data['roi']['height']
    cam_info.roi.width = calib_data['roi']['width']
    cam_info.roi.do_rectify = calib_data['roi']['do_rectify']
    
    return cam_info
Пример #20
0
def yaml_to_CameraInfo(yaml_fname):
    with open(yaml_fname, "r") as file_handle:
        calib_data = yaml.load(file_handle)
 
    camera_info_msg = CameraInfo()
    camera_info_msg.width = calib_data["image_width"]
    camera_info_msg.height = calib_data["image_height"]
    camera_info_msg.K = calib_data["camera_matrix"]["data"]
    camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
    camera_info_msg.R = calib_data["rectification_matrix"]["data"]
    camera_info_msg.P = calib_data["projection_matrix"]["data"]
    camera_info_msg.distortion_model = calib_data["distortion_model"]
    return camera_info_msg
Пример #21
0
def test():
    rospy.wait_for_service(service_name)

    # build theoretical calibration for Unibrain Fire-i camera in
    # 640x480 mode
    cinfo = CameraInfo()
    cinfo.width = 1360
    cinfo.height = 1024
    #cx = (cinfo.width - 1.0)/2.0
    #cy = (cinfo.height - 1.0)/2.0
    #fx = fy = 0.0043                    # Unibrain Fire-i focal length
    #fx = 1046.16197
    #fy = 1043.87094
    #cx = 695.62128
    #cy = 544.32183
    #k1 = -0.23268
    #k2 = 0.08580
    #p1 = 0.00098
    #p2 = -0.00022
    #cinfo.K = [fx, 0., cx, 0., fy, cy, 0., 0., 1.]
    #cinfo.R = [1., 0., 0., 0., 1., 0., 0., 0., 1.]
    #cinfo.P = [fx, 0., cx, 0., 0., fy, cy, 0., 0., 0., 1., 0.]

    cinfo.K = [430.21554970319971, 0.0, 306.6913434743704, 0.0, 430.53169252696676, 227.22480030078816, 0.0, 0.0, 1.0]
    cinfo.D = [-0.33758562758914146, 0.11161239414304096, -0.00021819272592442094, -3.029195446330518e-05, 0]

    #cinfo.K = [4827.94, 0, 1223.5, 0, 4835.62, 1024.5, 0, 0, 1]
    #cinfo.D = [-0.41527, 0.31874, -0.00197, 0.00071, 0]
    #cinfo.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
    #cinfo.P = [4827.94, 0, 1223.5, 0, 0, 4835.62, 1024.5, 0, 0, 0, 1, 0]


    try:
        set_camera_info = rospy.ServiceProxy(service_name, SetCameraInfo)
        response = set_camera_info(cinfo)
        rospy.loginfo("set_camera_info: success=" + str(response.success)
                      + ", status=" + str(response.status_message))
        return response.success
    except rospy.ServiceException, e:
        print "Service call failed: %s"%e
Пример #22
0
 def publish(self, event):
     if self.imgmsg is None:
         return
     now = rospy.Time.now()
     # setup ros message and publish
     with self.lock:
         self.imgmsg.header.stamp = now
         self.imgmsg.header.frame_id = self.frame_id
     self.pub.publish(self.imgmsg)
     if self.publish_info:
         info = CameraInfo()
         info.header.stamp = now
         info.header.frame_id = self.frame_id
         info.width = self.imgmsg.width
         info.height = self.imgmsg.height
         if self.fovx is not None and self.fovy is not None:
             fx = self.imgmsg.width / 2.0 / \
                 np.tan(np.deg2rad(self.fovx / 2.0))
             fy = self.imgmsg.height / 2.0 / \
                 np.tan(np.deg2rad(self.fovy / 2.0))
             cx = self.imgmsg.width / 2.0
             cy = self.imgmsg.height / 2.0
             info.K = np.array([fx, 0, cx,
                                0, fy, cy,
                                0, 0, 1.0])
             info.P = np.array([fx, 0, cx, 0,
                                0, fy, cy, 0,
                                0, 0, 1, 0])
             info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
         self.pub_info.publish(info)
def get_camera_info(hard_coded=True):

    if hard_coded:
        cx = 319.5
        cy = 239.5
        fx = 525.5
        fy = 525.5

        return (cx, cy, fx, fy)

    #if we are using a different camera, then
    #we can listen to the ros camera info topic for that device
    #and get our values here.
    else:

        import image_geometry
        from sensor_msgs.msg import CameraInfo

        cam_info = CameraInfo()

        cam_info.height = 480
        cam_info.width = 640
        cam_info.distortion_model = "plumb_bob"
        cam_info.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        cam_info.K = [525.0, 0.0, 319.5, 0.0, 525.0, 239.5, 0.0, 0.0, 1.0]
        cam_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        cam_info.P = [525.0, 0.0, 319.5, 0.0, 0.0, 525.0, 239.5, 0.0, 0.0, 0.0, 1.0, 0.0]
        cam_info.binning_x = 0
        cam_info.binning_y = 0
        cam_info.roi.x_offset = 0
        cam_info.roi.y_offset = 0
        cam_info.roi.height = 0
        cam_info.roi.width = 0
        cam_info.roi.do_rectify = False

        camera_model = image_geometry.PinholeCameraModel()
        camera_model.fromCameraInfo(cam_info)

        return camera_model.cx(), camera_model.cy(), camera_model.fx(), camera_model.fy()
Пример #24
0
    def get_cam_info(self, cam_name):
        cam_name = "calibrations/" + cam_name + ".yaml"
        stream = file(os.path.join(os.path.dirname(__file__), cam_name), 'r')

        calib_data = yaml.load(stream)

        cam_info = CameraInfo()
        cam_info.width = calib_data['image_width']
        cam_info.height = calib_data['image_height']
        cam_info.K = calib_data['camera_matrix']['data']
        cam_info.D = calib_data['distortion_coefficients']['data']
        cam_info.R = calib_data['rectification_matrix']['data']
        cam_info.P = calib_data['projection_matrix']['data']
        cam_info.distortion_model = calib_data['distortion_model']
        return cam_info
Пример #25
0
    def test_with_known_image(self):
        filename = rospy.get_param("~filename")
        self.setup()  # Setup the node

        # Publish the camera info
        msg_info = CameraInfo()
        msg_info.height = 480
        msg_info.width = 640
        msg_info.K = [331.026328, 0.0, 319.035097, 0.0, 335.330339, 216.450133, 0.0, 0.0, 1.0]
        self.pub_info.publish(msg_info)

        # Publish the test image
        img = cv2.imread(filename)
        cvb = CvBridge()
        msg_raw = cvb.cv2_to_imgmsg(img, encoding="bgr8")
        self.pub_raw.publish(msg_raw)

        # Wait for the message to be received
        timeout = rospy.Time.now() + rospy.Duration(5)  # Wait at most 5 seconds for the node to reply
        while not self.msg_received and not rospy.is_shutdown() and rospy.Time.now() < timeout:
            rospy.sleep(0.1)
        self.assertLess(rospy.Time.now(), timeout, "Waiting for apriltag detection timed out.")

        # Check that an apriltag with id=108 was detected
        found = 0
        for tag in self.msg_tags.detections:
            if tag.id == 108:
                found += 1
                self.assertAlmostEqual(tag.pose.pose.position.x, 0.0, delta=0.1)  # Allow 10 cm of error margin
                self.assertAlmostEqual(tag.pose.pose.position.y, 0.0, delta=0.1)  # Allow 10 cm of error margin
                self.assertAlmostEqual(tag.pose.pose.position.z, 0.305, delta=0.1)  # Allow 10 cm of error margin
                # Convert the quat to an angle about z
                rot = tag.pose.pose.orientation
                ang = tr.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))[2]  # z axis angle only
                self.assertAlmostEqual(ang, 0, delta=15 * np.pi / 180)  # Allow up to 15 degrees of error margin

        self.assertGreaterEqual(found, 1, "Expected apriltag with id=108 not found.")
Пример #26
0
  def stream(self):
    url = 'http://%s/nphMotionJpeg' % self.axis.hostname
    url = url + "?resolultion=%dx%d" % (self.width, self.height)
    fp = urllib.urlopen(url)

    print 'Streaming \n'

    while self.notdone:
      boundary = fp.readline()

      header = {}
      while 1:
        line = fp.readline()
        if line == "\r\n": break
        line = line.strip()

        parts = line.split(": ", 1)
        header[parts[0]] = parts[1]

      content_length = int(header['Content-length'])

      img = fp.read(content_length)
      line = fp.readline()
      
      msg = CompressedImage()
      msg.header.stamp = rospy.Time.now()
      msg.format = "jpeg"
      msg.data = img

      self.axis.pub.publish(msg)

      cimsg = CameraInfo()
      cimsg.header.stamp = msg.header.stamp
      cimsg.width = self.width
      cimsg.height = self.height

      self.axis.caminfo_pub.publish(cimsg)
Пример #27
0
    def default(self, ci='unused'):
        if not self.component_instance.capturing:
            return # press [Space] key to enable capturing

        image_local = self.data['image']

        image = Image()
        image.header = self.get_ros_header()
        image.height = self.component_instance.image_height
        image.width = self.component_instance.image_width
        image.encoding = self.encoding
        image.step = image.width * 4

        # VideoTexture.ImageRender implements the buffer interface
        image.data = bytes(image_local)

        # fill this 3 parameters to get correcty image with stereo camera
        Tx = 0
        Ty = 0
        R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

        intrinsic = self.data['intrinsic_matrix']

        camera_info = CameraInfo()
        camera_info.header = image.header
        camera_info.height = image.height
        camera_info.width = image.width
        camera_info.distortion_model = 'plumb_bob'
        camera_info.D = [0]
        camera_info.K = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2],
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2],
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2]]
        camera_info.R = R
        camera_info.P = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2], Tx,
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2], Ty,
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2], 0]

        if self.pub_tf:
            self.publish_with_robot_transform(image)
        else:
            self.publish(image)
        self.topic_camera_info.publish(camera_info)
    def toCameraInfo(self):
        msg = CameraInfo()

        (msg.width, msg.height) = self.size

        if self.D.size > 5:
            msg.distortion_model = "rational_polynomial"
        else:
            msg.distortion_model = "plumb_bob"

        msg.D = numpy.ravel(self.D).copy().tolist()
        msg.K = numpy.ravel(self.K).copy().tolist()
        msg.R = numpy.ravel(self.R).copy().tolist()
        msg.P = numpy.ravel(self.P).copy().tolist()

        return msg
Пример #29
0
    def fetch_image(self, cam):
        cam.simulate()

        if not cam.pixels:
            return None, None
        cv_img = cv.CreateImageHeader((cam.width , cam.height), cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_img, cam.pixels, cam.width*3)
        cv.ConvertImage(cv_img, cv_img, cv.CV_CVTIMG_FLIP)
        im = self.bridge.cv_to_imgmsg(cv_img, "rgb8")

        caminfo = CameraInfo()
        caminfo.header = im.header
        caminfo.height = cam.height
        caminfo.width = cam.width
        caminfo.D = 5*[0.]
        caminfo.K = sum([list(r) for r in cam.K],[])
        caminfo.P = sum([list(r) for r in cam.P],[])
        caminfo.R = sum([list(r) for r in cam.R],[])

        return im, caminfo
Пример #30
0
def pickle_to_info(info_pickle):
    info = CameraInfo()
    info.header.stamp = rospy.Time()
    info.header.seq = info_pickle.header.seq
    info.header.frame_id = info_pickle.header.frame_id
    info.roi.x_offset = info_pickle.roi.x_offset
    info.roi.y_offset = info_pickle.roi.y_offset
    info.roi.height = info_pickle.roi.height
    info.roi.width = info_pickle.roi.width
    info.height = info_pickle.height
    info.width = info_pickle.width
    info.D = info_pickle.D
    info.K = info_pickle.K
    info.R = info_pickle.R
    info.P = info_pickle.P
    
    return info
Пример #31
0
    def __init__(self,
                 apriltag_ns="apriltag",
                 init_node=False,
                 full_img_get_path="/tmp/get_image.png",
                 full_img_save_path="/tmp/save_image.png"):

        # start apriltag interface node
        if (init_node):
            rospy.init_node(apriltag_ns.strip("/") + "_interface")

        self.image_frame_id = None

        # set up subs, pubs, and services
        camera_info_topic = rospy.get_param(
            "/" + apriltag_ns + "/camera_info_topic",
            default="camera/color/image_raw").strip("/")
        rospy.wait_for_service("/" + apriltag_ns + "/snap_picture")
        rospy.wait_for_service("/" + apriltag_ns +
                               "/single_image_tag_detection")
        self.sub_camera_info = rospy.Subscriber("/" + camera_info_topic,
                                                CameraInfo,
                                                self.camera_info_cb)
        self.srv_snap_picture = rospy.ServiceProxy(
            "/" + apriltag_ns + "/snap_picture", SnapPicture)
        self.request = AnalyzeSingleImageRequest()
        self.request.full_path_where_to_get_image = full_img_get_path
        self.request.full_path_where_to_save_image = full_img_save_path
        self.srv_analyze_image = rospy.ServiceProxy(
            "/" + apriltag_ns + "/single_image_tag_detection",
            AnalyzeSingleImage)
        self.pub_transforms = rospy.Publisher("/static_transforms",
                                              TransformStamped,
                                              queue_size=50)

        rospy.sleep(0.5)

        # wait to receive camera info (means that we are properly subbed)
        while (self.request.camera_info == CameraInfo()
               and not rospy.is_shutdown()):
            pass

        print("Initialized InterbotixAprilTagInterface!\n")
 def __init__(self):
     self.pub = rospy.Publisher("camera/cameraInfo",
                                CameraInfo,
                                queue_size=1)
     self.pub_Image = rospy.Publisher("camera/image_raw",
                                      Image,
                                      queue_size=1)
     self.image = Image()
     self.bridge = CvBridge()
     self.Cap = cv2.VideoCapture(int(camera_id))
     self.Cap.set(cv2.CAP_PROP_FPS, fps)
     self.Cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
     self.kill = False
     #self.Cap.set(cv2.CV_CAP_PROP_BUFFERSIZE, 1);
     #os.system(os.path.join(self.script_dir,'camConfig.sh')) #Set the camera Gain and Exposure Parameters
     if path_to_calibration_file is not None:
         camera_name, self.camera_info = readCalibration(os.path.join(self.script_dir,\
                                                                 path_to_calibration_file))
     else:
         self.camera_info = CameraInfo()
Пример #33
0
    def make_camera_info(self):
        self.camera_info = CameraInfo()
        ros_pack = rospkg.RosPack()
        cam_file = open(
            ros_pack.get_path('ep_bringup') + '/config/head_camera.yaml')
        cam_data = yaml.safe_load(cam_file)

        self.camera_info.header = rospy.Header()
        self.camera_info.header.frame_id = cam_data["camera_name"]

        self.camera_info.distortion_model = cam_data["distortion_model"]
        self.camera_info.width = cam_data['image_width']
        self.camera_info.height = cam_data['image_height']
        self.camera_info.binning_x = 0
        self.camera_info.binning_y = 0

        self.camera_info.K = cam_data['camera_matrix']['data']
        self.camera_info.D = cam_data['distortion_coefficients']['data']
        self.camera_info.R = cam_data['rectification_matrix']['data']
        self.camera_info.P = cam_data['projection_matrix']['data']
Пример #34
0
    def __init__(
        self,
        settingsFile,
        cameraInfoFile,
    ):
        """ Init the cameraModel as PinholeCameraModel with camera intrinsics and params
        =============
        settingsFile : json file storing 3D-2D point pairs and params for optimization
        cameraInfoFile : yaml file storing camera intrinsics and params
        """

        self.settings = json.load(settingsFile)
        self.cameraParams = yaml.load(cameraInfoFile)
        self.cameraModel = PinholeCameraModel()
        self.cameraInfo = CameraInfo()

        self._fillCameraInfo()
        self.cameraModel.fromCameraInfo(self.cameraInfo)

        print('Camera Model is initialized, ready to calibrate ...\n')
Пример #35
0
    def __init__(self):
        rospy.init_node('pub_camera_info')
        yaml_file = rospy.get_param('~yaml_file')

        # Load data from file
        with open(yaml_file, "r") as file_handle:
            calib_data = yaml.load(file_handle)
        
        self.camera_info_msg = CameraInfo()
        try:
            self.camera_info_msg.width = calib_data["image_width"]
            self.camera_info_msg.height = calib_data["image_height"]
            self.camera_info_msg.K = calib_data["camera_matrix"]["data"]
            self.camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
            self.camera_info_msg.R = calib_data["rectification_matrix"]["data"]
            self.camera_info_msg.P = calib_data["projection_matrix"]["data"]
            self.camera_info_msg.distortion_model = calib_data["distortion_model"]
        except TypeError, e:
            rospy.logwarn('Camera info not found... shutting down')
            sys.exit(0)
Пример #36
0
    def on_timer(self, _):
        now = rospy.Time.now()
        future = now + rospy.Duration(1.0 / self.updates_per_second)

        # publish camera intrinsics
        intrinsics = self.cam.get_intrinsics_as_bunch()
        intrinsic_msg = CameraInfo(**intrinsics.__dict__)
        intrinsic_msg.header.stamp = now
        intrinsic_msg.header.frame_id = self.frame_id
        self.intr_pub.publish(intrinsic_msg)

        # publish camera transform
        translation, rotation = self.cam.get_ROS_tf()
        self.tf_b.sendTransform(
            translation,
            rotation,
            future,
            self.frame_id,
            '/map',
        )
Пример #37
0
def createCameraInfoMsg(timestamp, camera_definition):

    cam_info_msg = CameraInfo()
    cam_info_msg.header.stamp = timestamp
    cam_info_msg.header.frame_id = camera_definition.frame_id

    f = camera_definition.focal_length
    center_x = math.floor(camera_definition.width / 2.0) + 0.5
    center_y = math.floor(camera_definition.height / 2.0) + 0.5

    cam_info_msg.width = camera_definition.width
    cam_info_msg.height = camera_definition.height
    k = [f, 0, center_x, 0, f, center_y, 0, 0, 1.0]
    cam_info_msg.K = k
    cam_info_msg.P = [
        k[0], k[1], k[2], 0, k[3], k[4], k[5], 0, k[6], k[7], k[8], 0
    ]

    cam_info_msg.distortion_model = "plumb_bob"
    cam_info_msg.D = [0.0] * 5

    return cam_info_msg
Пример #38
0
def main():
    rospy.init_node('pub_gap_camera', anonymous=True)
    image_pub = rospy.Publisher("himax_camera", Image)
    bridge = CvBridge()

    cv_file = cv2.FileStorage(config.folder_path + "/data/calibration.yaml",
                              cv2.FILE_STORAGE_READ)
    k = cv_file.getNode("k").mat()
    D = cv_file.getNode("D").mat()
    size = cv_file.getNode("size").mat()
    cv_file.release()

    k = k.flatten().tolist()
    D = D.flatten().tolist()
    camera_info = CameraInfo()
    camera_info.K = k
    camera_info.D = D
    camera_info.height = size[0][0]
    camera_info.width = size[1][0]
    print(camera_info)

    it = ImageTransformer()
    new_size, shift_x, shift_y = it.get_crop_parameters(
        config.folder_path + "/data/calibration.yaml",
        config.folder_path + "/data/bebop_calibration.yaml")
    shift_x = int(shift_x)
    shift_y = int(shift_y)

    pipe_name = config.folder_path + "/pulp/image_pipe"
    if not os.path.exists(pipe_name):
        os.mkfifo(pipe_name)

    pipein = os.open(pipe_name, os.O_RDONLY)
    fcntl.fcntl(pipein, F_SETPIPE_SZ, 1000000)

    frame_id = 1
    while not rospy.is_shutdown():
        data = read_from_pipe(pipein)
        if data is not None:
            cv_image = np.reshape(data,
                                  (config.himax_height, config.himax_width))
            cv_image = cv_image[shift_y:shift_y + new_size[1],
                                shift_x:shift_x + new_size[0]]
            msg = bridge.cv2_to_imgmsg(cv_image)
            msg.header.stamp = rospy.Time.now()
            image_pub.publish(msg)
            rospy.sleep(0)

    os.close(pipein)
Пример #39
0
def get_camera_params():
    # read parameters
    width = rospy.get_param('~width', 640)
    height = rospy.get_param('~height', 480)
    Fx = rospy.get_param('~Fx', 360)
    Fy = rospy.get_param('~Fy', 360)
    Cx = rospy.get_param('~Cx', 360)
    Cy = rospy.get_param('~Cy', 240)

    # create sensor message
    camera_info_msg = CameraInfo()
    camera_info_msg.distortion_model = 'plumb_bob'
    camera_info_msg.width = width
    camera_info_msg.height = height
    camera_info_msg.roi.width = width
    camera_info_msg.roi.height = height
    camera_info_msg.K = [Fx, 0, Cx, 0, Fy, Cy, 0, 0, 1]
    camera_info_msg.D = [0, 0, 0, 0]

    camera_info_msg.P = [Fx, 0, Cx, 0, 0, Fy, Cy, 0, 0, 0, 1, 0]
    camera_info_msg.header.frame_id = "front_center_optical"
    return camera_info_msg
Пример #40
0
    def __init__(self):
        rospy.init_node("little_stereo_camera", anonymous = True)
        self.bridge = CvBridge() 

        self.left_image_pub = rospy.Publisher("stereo/left/image_raw", Image, queue_size=1)
        self.left_image_info_pub = rospy.Publisher("stereo/left/camera_info", CameraInfo, queue_size=1)
        self.right_image_pub = rospy.Publisher("stereo/right/image_raw", Image, queue_size=1)
        self.right_image_info_pub = rospy.Publisher("stereo/right/camera_info", CameraInfo, queue_size=1)

        self.camera_info = CameraInfo()
        self.msg_header = Header()
        self.ros_image = Image()
        self.ros_image.height = 480
        self.ros_image.width = 640
        
        cam_id= rospy.get_param("cam_id", 0)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        self.left_yaml_file = dir_path+"/left.yaml"
        self.right_yaml_file = dir_path+"/right.yaml"
        self.cam=cv2.VideoCapture(cam_id)
        cam_mode_dict={
            'LEFT_EYE_MODE':1,
            'RIGHT_EYE_MODE':2,
            'RED_BLUE_MODE':3,
            'BINOCULAR_MODE':4,
            }
        cam_mode=cam_mode_dict['BINOCULAR_MODE']
        command_list=[
        "uvcdynctrl -d /dev/video{cam_id} -S 6:8 '(LE)0x50ff'",
        "uvcdynctrl -d /dev/video{cam_id} -S 6:15 '(LE)0x00f6'",
        "uvcdynctrl -d /dev/video{cam_id} -S 6:8 '(LE)0x2500'",
        "uvcdynctrl -d /dev/video{cam_id} -S 6:8 '(LE)0x5ffe'",
        "uvcdynctrl -d /dev/video{cam_id} -S 6:15 '(LE)0x0003'",
        "uvcdynctrl -d /dev/video{cam_id} -S 6:15 '(LE)0x0002'",
        "uvcdynctrl -d /dev/video{cam_id} -S 6:15 '(LE)0x0012'",
        "uvcdynctrl -d /dev/video{cam_id} -S 6:15 '(LE)0x0004'",
        "uvcdynctrl -d /dev/video{cam_id} -S 6:8 '(LE)0x76c3'",
        "uvcdynctrl -d /dev/video{cam_id} -S 6:10 '(LE)0x0{cam_mode}00'",
        ]
        for command in command_list:
            subprocess.Popen(shlex.split(command.format(cam_id=cam_id,cam_mode=cam_mode)))
Пример #41
0
    def test_update(self):
        config, robot_params, P = self.load()

        camera_points = [
            ImagePoint(0, 0),
            ImagePoint(1, 0),
            ImagePoint(0, 1),
            ImagePoint(1, 1)
        ]

        block = CameraChainSensor(
            config,
            CameraMeasurement(camera_id="camA",
                              cam_info=CameraInfo(P=P),
                              image_points=camera_points),
            ChainMeasurement(chain_id="chainA",
                             chain_state=JointState(position=[0])))
        block.update_config(robot_params)

        target = matrix([[1, 2, 1, 2], [0, 0, 1, 1], [1, 1, 1, 1],
                         [1, 1, 1, 1]])

        h = block.compute_expected(target)
        z = block.get_measurement()
        r = block.compute_residual(target)

        self.assertAlmostEqual(
            linalg.norm(h - matrix([[0, 1, 0, 1], [0, 0, 1, 1]]).T), 0.0, 6)
        self.assertAlmostEqual(
            linalg.norm(z - matrix([[0, 1, 0, 1], [0, 0, 1, 1]]).T), 0.0, 6)
        self.assertAlmostEqual(linalg.norm(r), 0.0, 6)

        # Test Sparsity
        sparsity = block.build_sparsity_dict()
        self.assertEqual(sparsity['transforms']['transformA'],
                         [1, 1, 1, 1, 1, 1])
        self.assertEqual(sparsity['transforms']['transformB'],
                         [1, 1, 1, 1, 1, 1])
        self.assertEqual(sparsity['dh_chains']['chainA']['dh'], [[1, 1, 1, 1]])
        self.assertEqual(sparsity['rectified_cams']['camA']['baseline_shift'],
                         1)
Пример #42
0
 def __init__(self, net, meta):
     self.image_pub = rospy.Publisher("/object_detect",
                                      Image,
                                      queue_size=10)
     self.point_pub = rospy.Publisher("/point_find",
                                      PointStamped,
                                      queue_size=10)
     self.net = net
     self.meta = meta
     self.bridge = CvBridge()
     self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image,
                                       self.callback)
     self.image_depth_sub = rospy.Subscriber(
         "/camera/aligned_depth_to_color/image_raw", Image,
         self.depth_callback)
     self.camera_info_sub = rospy.Subscriber(
         "/camera/aligned_depth_to_color/camera_info", CameraInfo,
         self.info_callback)
     self.depth_image = Image()
     self.camera_info = CameraInfo()
     self.point = PointStamped()
Пример #43
0
    def write_to_yaml(self, out_filename):
        # create CameraInfo message
        ci = CameraInfo()
        # ci.header = self.header
        # ci.height = self.im_size[1]
        # ci.width = self.im_size[0]
        # ci.distortion_model = self.dist_model
        # ci.D[:] = self.D.flatten()
        # ci.K[:] = self.K.flatten()
        # ci.R[:] = self.R.flatten()
        # ci.P[:9] = self.P.flatten()

        # write to YAML
        done = saveCalibration(new_info=ci,
                               url=out_filename,
                               cname=self.camera_name)
        if done:
            print '{:s} written'.format(out_filename)
        else:
            print 'WARNING: {:s} camera info not saved'.format(
                self.camera_name)
Пример #44
0
def talker():
    cam = CameraInfo()
    cam.width = 1
    cam.height = 1
    cam.header.frame_id = "/camera"
    cam.distortion_model = "plumb_bob"
    rospy.init_node('publish_caminfo', anonymous=True)
    pub = rospy.Publisher('camera_info', CameraInfo)
    rate = rospy.Rate(20) # 10hz
    while not rospy.is_shutdown():
        f = math.sin(rospy.get_time() / (2* math.pi) * 600)
        f *=f
        cam.K[0] = f
        cam.K[4] = f
        cam.header.stamp=rospy.Time.now()

        pub.publish(cam)

        rate.sleep()
Пример #45
0
 def __init__(self):
     rospy.init_node('siftpublisher', anonymous=True)
     self.bridge = CvBridge()
     self.cv_image = None
     self.CamInfo = CameraInfo()
     self.pose_pub = rospy.Publisher('pose', PoseStamped, queue_size=1, latch=True)
     self.pose_status_pub = rospy.Publisher('poseStatus', Bool, queue_size=1, latch=True)
     self.gripper_size_pub = rospy.Publisher('GripperPosition', Int64, queue_size=1, latch=True)
     self.object_height_pub = rospy.Publisher('ObjectHeight', Float32, queue_size=1, latch=True)
     self.image_pub = rospy.Publisher('ImagePosition', Image)
     self.rate = rospy.Rate(1)
     self.curr_stamp = rospy.Time.now()
     self.counter=0
     self.x_pos = np.zeros(11,dtype=float)
     self.y_pos = np.zeros(11,dtype=float)
     self.z_pos = np.zeros(11,dtype=float)
     self.x_rot = np.zeros(11,dtype=float)
     self.y_rot = np.zeros(11,dtype=float)
     self.z_rot = np.zeros(11,dtype=float)
     self.id = '2'
     self.offset = 0
Пример #46
0
 def __init__(self, addr='localhost', req_port='50020'):
     '''
     Initialize Pupil_ZMQ_ROS object, init ZMQ and ROS
     :param addr: 'localhost' if pupil_capture or pupil_service is
                 running localy
     :param req_port: port number of Pupil_Remote plugin
     '''
     self.zmq_req = None
     self.zmq_sub = None
     self.ros_gaze_publisher = None
     self.ros_pupil_publisher = None
     self.ros_world_img_publisher = None
     self.ros_world_calibration_publisher = None
     self.cameraInfo = CameraInfo()
     self.ros_eye0_img_publisher = None
     self.ros_eye1_img_publisher = None
     self.cv_bridge = CvBridge()
     self.ros_started = True
     self.seq = 0
     self.init_zmq(addr, req_port)
     self.init_ros()
Пример #47
0
    def __init__(self, filename="", camera_info={}):
        self.pub = rospy.Publisher('camera_info', CameraInfo, queue_size=1)
        self.sub = rospy.Subscriber('image', Image, self.image_cb)

        if len(filename) == 0 and len(camera_info) == 0:
            rospy.logwarn('Default camera info will be published')
        if len(filename) and len(camera_info):
            rospy.logwarn('Supplied camera_info takes prioriy over filename')
        info = {
            'image_height': 0,
            'image_width': 0,
            'distortion_model': '',
            'distortion_coefficients': {
                'data': [0, 0, 0, 0, 0]
            },
            'camera_matrix': {
                'data': [0, 0, 0, 0, 0, 0, 0, 0, 0]
            },
            'rectification_matrix': {
                'data': [0, 0, 0, 0, 0, 0, 0, 0, 0]
            },
            'projection_matrix': {
                'data': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
            }
        }
        if len(filename):
            with open(filename, 'r') as stream:
                info = yaml.safe_load(stream)
        if len(camera_info):
            info = camera_info

        self.info = CameraInfo(height=info['image_height'],
                               width=info['image_width'],
                               distortion_model=info['distortion_model'],
                               D=info['distortion_coefficients']['data'],
                               K=info['camera_matrix']['data'],
                               R=info['rectification_matrix']['data'],
                               P=info['projection_matrix']['data'])
        rospy.loginfo('[{}] Loaded camera_info:\n{}'.format(
            rospy.get_name(), self.info))
Пример #48
0
    def __init__(self, camera_name, im_size, frame_id, pos, ori):
        self.camera_name = camera_name
        self.im_size = im_size

        # create Header
        self.header = Header()
        self.header.frame_id = frame_id

        # me being naive
        fovy = 45
        znear = 0.01
        zfar = 50

        cx = im_size[0] / 2.
        cy = im_size[1] / 2.
        fy = (im_size[1] / 2.) / np.tan(fovy*np.pi/180.0)
        fx = fy
        self.K = np.array([[fx,  0, cx],
                           [ 0, fy, cy],
                           [ 0,  0,  1]])

        self.R = np.eye(3, dtype=np.float64)
        self.P = self.K

        self.ci = CameraInfo()
        self.ci.header = self.header
        self.ci.height = self.im_size[1]
        self.ci.width = self.im_size[0]
        self.ci.K[:] = self.K.flatten()
        self.ci.R[:] = self.R.flatten()
        self.ci.P[:9] = self.P.flatten()

        self.caminfo_pub = rospy.Publisher(camera_name + "_info",
                                           CameraInfo,
                                           queue_size=5)


        self.pos = pos
        self.ori = ori
        self.tf_br = tf.TransformBroadcaster()
Пример #49
0
    def __init__(self):
        self.image_pub = rospy.Publisher(
            "/carina/sensor/camera/left/image_detection", Image, queue_size=1)
        self.pub_cluster_tfsign = rospy.Publisher("/tfsign_cluster",
                                                  PointCloud2,
                                                  queue_size=1)

        self.image_sub = rospy.Subscriber(
            "/carina/sensor/camera/left/image_raw", Image, self.callback)
        self.info_sub_l = rospy.Subscriber(
            "/carina/sensor/camera/left/camera_info",
            CameraInfo,
            self.callback_info_left,
            queue_size=1)
        self.pcl_sub = rospy.Subscriber(
            "/carina/sensor/lidar/velodyne_obstacles_full",
            PointCloud2,
            self.point_cloud_callback,
            queue_size=1)

        self.bridge = CvBridge()
        self.camInfo = CameraInfo()
        self.tf_l = tf.TransformListener()
        # self.tf_ros = tf.Transformer()
        self.sign_l_x = 0.0
        self.sign_l_y = 0.0
        self.sign_r_x = 0.0
        self.sign_r_y = 0.0

        self.points = np.zeros((1, 3))

        self.model = PinholeCameraModel()

        if os.path.isfile("clf/clf_svc_hog.pkl"):
            print("[INFO] loading classifier: SVC trained on HoG features...")
            self.svc = joblib.load("clf/clf_svc_hog.pkl")
            print("[INFO] Classifer is loaded as instance ::svc::")

        else:
            print("[INFO] pre-trained classifier not found.")
Пример #50
0
 def debagify_msg(self, msg):
     """
     The messages read from the rosbags do not have the correct classes so we need to reinitialize themself.
     """
     if 'Header' in str(type(msg)):
         newMsg = Header()
         newMsg.seq = msg.seq
         newMsg.stamp.secs = msg.stamp.secs
         newMsg.stamp.nsecs = msg.stamp.nsecs
         newMsg.frame_id = msg.frame_id
         return newMsg
     if 'RegionOfInterest' in str(type(msg)):
         newMsg = RegionOfInterest()
         newMsg.x_offset = msg.x_offset
         newMsg.y_offset = msg.y_offset
         newMsg.height = msg.height
         newMsg.width = msg.width
         newMsg.do_rectify = msg.do_rectify
         return newMsg
     if '_sensor_msgs__CompressedImage' in str(type(msg)):
         im = self.bridge.compressed_imgmsg_to_cv2(msg).copy()
         newMsg = self.bridge.cv2_to_compressed_imgmsg(im)
         newMsg.header = self.debagify_msg(msg.header)
         return newMsg
     if '_sensor_msgs__CameraInfo' in str(type(msg)):
         newMsg = CameraInfo()
         newMsg.header = self.debagify_msg(msg.header)
         newMsg.height = msg.height
         newMsg.width = msg.width
         newMsg.distortion_model = msg.distortion_model
         newMsg.D = msg.D
         newMsg.P = msg.P
         newMsg.R = msg.R
         newMsg.K = msg.K
         newMsg.binning_x = msg.binning_x
         newMsg.binning_y = msg.binning_y
         newMsg.roi = self.debagify_msg(msg.roi)
         return newMsg
     raise Exception("Message type %s could not be debagified" %
                     str(type(msg)))
Пример #51
0
 def cb_camera_info(self, msg):
     # unsubscribe from camera_info
     self.loginfo('Camera info message received. Unsubscribing from camera_info topic.')
     # noinspection PyBroadException
     try:
         self.sub_camera_info.shutdown()
     except BaseException:
         pass
     # ---
     H, W = msg.height, msg.width
     # create new camera info
     self.camera_model = PinholeCameraModel()
     self.camera_model.fromCameraInfo(msg)
     # find optimal rectified pinhole camera
     with self.profiler('/cb/camera_info/get_optimal_new_camera_matrix'):
         rect_camera_K, _ = cv2.getOptimalNewCameraMatrix(
             self.camera_model.K,
             self.camera_model.D,
             (W, H),
             self.alpha.value
         )
     # create rectification map
     with self.profiler('/cb/camera_info/init_undistort_rectify_map'):
         self.mapx, self.mapy = cv2.initUndistortRectifyMap(
             self.camera_model.K,
             self.camera_model.D,
             None,
             rect_camera_K,
             (W, H),
             cv2.CV_32FC1
         )
     # pack rectified camera info into a CameraInfo message
     self.rect_camera_info = CameraInfo(
         width=W,
         height=H,
         K=rect_camera_K.flatten().tolist(),
         R=np.eye(3).flatten().tolist(),
         P=np.zeros((3, 4)).flatten().tolist(),
     )
Пример #52
0
 def __init__(self):
     header = Header(frame_id='upward_looking_camera_link')
     self._cam_info = \
     CameraInfo(
                 header=header,
                 height=480,
                 width=640,
                 distortion_model='plumb_bob',
                 D=[0.0, 0.0, 0.0, 0.0, 0.0],
                 K=[504.78216005824515, 0.0, 320.5, 0.0, 504.78216005824515, 240.5, 0.0, 0.0, 1.0],
                 R=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
                 P=[504.78216005824515, 0.0, 320.5, -0.0, 0.0, 504.78216005824515, 240.5, 0.0, 0.0, 0.0, 1.0, 0.0],
                 binning_x=0,
                 binning_y=0,
                 roi=RegionOfInterest(x_offset=0,
                                     y_offset=0,
                                     height=0,
                                     width=0,
                                     do_rectify=False
                                     )
                 )
     self._pub = rospy.Publisher('/upward_looking_camera/camera_info/fake',CameraInfo,queue_size=10)
Пример #53
0
    def run(self):
        # publish image 16 times per second
        rate = rospy.Rate(16) 

        while not rospy.is_shutdown():
            observation, reward, done, _ = env.step(self.action)
            env.render()
            if done:
                observation = env.reset()

            # publish image to image topic
            image_msg = CompressedImage()
            image_msg.header.stamp = rospy.Time.now()
            image_msg.format = "jpeg"
            image = cv2.cvtColor(np.ascontiguousarray(observation), cv2.COLOR_BGR2RGB)
            image_msg.data = np.array(cv2.imencode('.jpg', image)[1]).tostring()
            self.image_pub.publish(image_msg)

            # publish camera info to camera info topic
            self.info_pub.publish(CameraInfo())

            rate.sleep()
Пример #54
0
    def __init__(self):

        self.bbox_tfsign_pub = rospy.Publisher("/carina/sensor/tfsigns_box",
                                               BoundingBoxArray,
                                               queue_size=1)

        self.image_sub = rospy.Subscriber(
            "/carina/sensor/camera/left/image_raw", Image,
            self.callback_image_left)
        # self.info_sub_l = rospy.Subscriber("/carina/sensor/camera/left/camera_info", CameraInfo, self.callback_info_left, queue_size=1)

        self.bridge = CvBridge()
        self.camInfo = CameraInfo()

        self.model = PinholeCameraModel()

        if os.path.isfile("clf_google/clf_svc_hog.pkl"):
            print("[INFO] loading classifier: SVC trained on HoG features...")
            self.svc = joblib.load("clf_google/clf_svc_hog.pkl")
            print("[INFO] Classifer is loaded as instance ::svc::")
        else:
            print("[INFO] pre-trained classifier not found.")
Пример #55
0
    def __init__(self):

        # OpenCV Bridge
        self.bridge = CvBridge()

        # Initialize global messages
        self.depth = Image()
        self.odometry = Odometry()
        self.camera_info = CameraInfo()

        # ZED Image subscribers for RGB image
        sub1 = rospy.Subscriber('/camera/zed_node/rgb/image_rect_color', Image, self.zed_rgb_cb)

        #sub2 = rospy.Subscriber('/camera/zed_node/right/image_rect_color', Image, self.zed_right_cb)
        sub3 = rospy.Subscriber('/camera/zed_node/odom', Odometry, self.odom_cb)
        sub4 = rospy.Subscriber('/camera/zed_node/rgb/camera_info', CameraInfo, self.camera_info_cb)
        sub5 = rospy.Subscriber('/camera/zed_node/depth/depth_registered', Image, self.depth_cb)

        # Mono Camera Image Subscriber
        sub = rospy.Subscriber('/mono_image_color', Image, self.mono_cb)
        print('Object Detector listening for images..')

        self.segmented_pub = rospy.Publisher('/segmented_img', Image, queue_size=1)
        self.segmented_pub_2 = rospy.Publisher('/segmented_img_2', Image, queue_size=1)
        self.odom_pub = rospy.Publisher('/odom', Odometry, queue_size=1)
        self.camera_info_pub = rospy.Publisher('/camera_info', CameraInfo, queue_size=1)
        self.depth_pub = rospy.Publisher('/depth_registered', Image, queue_size=1)
        self.count = 0

        self.gate_mission = True
        self.path_marker_mission = False
        self.dummy_segment = False # For testing
        
        # Load configuration file here
        # config_string = rospy.get_param("/object_detector_config")
        # self.config = yaml.load(config_string)

        rospy.init_node('object_detector')
        rospy.spin()
Пример #56
0
    def __init__(self):
        rospy.init_node('pub_camera_info')
        yaml_file = rospy.get_param('~yaml_file')

        # Load data from file
        with open(yaml_file, "r") as file_handle:
            calib_data = yaml.load(file_handle)

        self.camera_info_msg = CameraInfo()
        self.camera_info_msg.width = calib_data["image_width"]
        self.camera_info_msg.height = calib_data["image_height"]
        self.camera_info_msg.K = calib_data["camera_matrix"]["data"]
        self.camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
        self.camera_info_msg.R = calib_data["rectification_matrix"]["data"]
        self.camera_info_msg.P = calib_data["projection_matrix"]["data"]
        self.camera_info_msg.distortion_model = calib_data["distortion_model"]

        self.sub_image = rospy.Subscriber('image_raw', Image, self.recv_image)
        self.pub_info = rospy.Publisher('camera_info',
                                        CameraInfo,
                                        latch=True,
                                        queue_size=1)
Пример #57
0
    def __init__(self):
        self.detector_id = "gate"
        self.numBits = 8
        self.imageWidth = 640
        self.imageHeight = 480
        self.maxVal = 2**self.numBits - 1
        self.gate_dimensions = np.array(rospy.get_param("object_tags/gate/dimensions")).astype(float)
        self.gate_width = self.gate_dimensions[0]
        self.gate_height = self.gate_dimensions[2]

        self.left_img_flag = False
        self.stereo_left = Image()
        self.left_camera_info = CameraInfo()
        self.left_stream = rospy.Subscriber("/albatross/stereo_camera_left_front/camera_image", Image, self.left_callback)
        self.left_camera_info = rospy.Subscriber("/albatross/stereo_camera_left_front/camera_info", CameraInfo, self.camera_info_callback)

        self.prev = [None, None]
        self.cv_bridge = CvBridge()
        self.gate_detection_pub = rospy.Publisher("gate_detections", Image, queue_size=10)
        rospy.wait_for_service("detector_bucket/register_object_detection")
        self.registration_service = rospy.ServiceProxy("detector_bucket/register_object_detection", RegisterObjectDetections)
        self.spin_callback = rospy.Timer(rospy.Duration(.010), self.spin)
 def publish_camera_info(self, height, width):
     camera_info_msg = CameraInfo()
     camera_info_msg.header.stamp = self.stamp
     camera_info_msg.header.frame_id = self.base_frame + "camera"
     camera_info_msg.height = height
     camera_info_msg.width = width
     f_y = self.mat_from_fov_and_resolution(
         self.h_fov_to_v_fov(1.39626, height, width),
         height)
     f_x = self.mat_from_fov_and_resolution(1.39626, width)
     camera_info_msg.K = [f_x, 0, width / 2,
                          0, f_y, height / 2,
                          0, 0, 1]
     camera_info_msg.P = [f_x, 0, width / 2, 0,
                          0, f_y, height / 2, 0,
                          0, 0, 1, 0]
     self.pub_camera_info.publish(camera_info_msg)
    def __init__(self):
        """
        Constructor
        """
        rospy.init_node("ros_yolo", anonymous=True, disable_signals=False)

        self.bridge = CvBridge()
        self.rgb_lock = threading.Lock()
        self.depth_lock = threading.Lock()

        self.cameraModel = PinholeCameraModel()
        self.cameraInfo = CameraInfo()

        self.sub_color = rospy.Subscriber("/camera/color/image_raw",
                                          Image,
                                          self.color_callback,
                                          queue_size=1)

        self.sub_depth = rospy.Subscriber(
            "/camera/aligned_depth_to_color/image_raw",
            Image,
            self.depth_callback,
            queue_size=1)
        self.sub_camera_info = rospy.Subscriber("/camera/depth/camera_info",
                                                CameraInfo,
                                                self.camera_info_callback,
                                                queue_size=1)

        self.target_pub = rospy.Publisher("/hand_detection/target_camera",
                                          PoseStamped)

        self.color_img = np.zeros((480, 640, 3), np.uint8)
        self.depth_img = np.zeros((480, 640), np.uint8)
        self.img_width = 640
        self.img_height = 480
        self.hand_counter = 0
        self.not_hand_counter = 0
        self.hand_3D_pos = np.empty((0, 3), int)
 def __init__(self):
     #Get our publisher ready
     #    self.image_pub = rospy.Publisher("image_topic",Image,queue_size = 10)
     #Get CvBridge object ready. This will do the conversion cv2->ROS
     self.imu = self.ImuState()
     self.image_sub = rospy.Subscriber("/camera1/image_rect_color", Image,
                                       self.recv)
     #    self.landing_sub = rospy.Subscriber("/landing_pad/image_rect",Image,self.find_tag)
     self.camera_info_sub = rospy.Subscriber("/camera1/camera_info",
                                             CameraInfo, self.get_cam_info)
     self.imu_sub = rospy.Subscriber("/mavros/imu/data", Imu,
                                     self.update_orient)
     self.track_pub = rospy.Publisher("/tracker/tracked",
                                      Bool,
                                      queue_size=20)
     self.tag_track_sub = rospy.Subscriber("/detections/tracked", Bool,
                                           self.tag_cb)
     self.heading_pub = rospy.Publisher("tracker/heading",
                                        TwistStamped,
                                        queue_size=10)
     self.state_pub = rospy.Publisher("/lezl/state",
                                      TwistStamped,
                                      queue_size=10)
     self.landing_pub = rospy.Publisher("/camera1/target_rect",
                                        Image,
                                        queue_size=20)
     self.camera_info_pub = rospy.Publisher("/landing_pad/camera_info",
                                            CameraInfo,
                                            queue_size=20)
     self.listener = tf.TransformListener()
     #    self.image_sub = rospy.Subscriber("/tracker/image",Image,self.recv)
     self.bridge = CvBridge()
     self.tracked = Bool()
     self.tag_tracked = Bool()
     self.tracked.data = False
     self.tag_tracked.data = False
     self.heading = TwistStamped()
     self.cam_info = CameraInfo()