Ejemplo n.º 1
0
def convert_image(msg, flag = False):
  '''
  Converts ROS image to OpenCV image, then blocks out errant pixels and rectifies.
  '''
  bridge = CvBridge()
  try:
    # cv_bridge automatically scales, we need to remove that behavior
    if bridge.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
      mono16 = bridge.imgmsg_to_cv2(msg, '16UC1')
      mono8 = numpy.array(numpy.clip(mono16, 0, 255), dtype=numpy.uint8)
      img = mono8
    elif 'FC1' in msg.encoding:
      # floating point image handling
      img = bridge.imgmsg_to_cv2(msg, "passthrough")
      _, max_val, _, _ = cv2.minMaxLoc(img)
      if max_val > 0:
        scale = 255.0 / max_val
        img = (img * scale).astype(numpy.uint8)
      else:
        img = img.astype(numpy.uint8)
    else:
        img = bridge.imgmsg_to_cv2(msg, "mono8")
  except CvBridgeError as e:
    print text_colors.WARNING + 'Warning: Image converted unsuccessfully before processing.' + text_colors.ENDCOLOR
    raise e

  # Black out a rectangle in the top left of the image since there are often erroneous pixels there (image conversion error perhaps?)
  cv2.rectangle(img, (0,0), (30,1), 0, cv2.cv.CV_FILLED)
  show_image('blacked out', img, flag)

  return img
class camera:

    def __init__(self):

        self.br = CvBridge()
        self.ir_image_sub = rospy.Subscriber("/kinect2/sd/image_ir",Image,self.ir_callback)
        # self.rgb_image_sub = rospy.Subscriber("/kinect2/hd/image_color",Image,self.rgb_callback)
        self.ir_img = None
        self.rgb_img = None

        self.rgb_rmat = None
        self.rgb_tvec = None
        self.ir_rmat = None
        self.ir_tvec = None

    def ir_callback(self,data):
    	try:
            ir_img = self.br.imgmsg_to_cv2(data, "passthrough")
            ir_min = np.amin(ir_img)
            ir_max = np.amax(ir_img)

            factor = 255.0 / float(ir_max - ir_min)
            self.ir_img = np.minimum(np.maximum(ir_img - ir_min, 0) * factor, 255.0)
            self.ir_img = self.ir_img.astype(np.uint8)

            clahe = cv2.createCLAHE()
            self.ir_img = clahe.apply(self.ir_img)


        except CvBridgeError as e:
            print(e)
        
        # print "ir image size: ",self.ir_img.shape

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num))
        cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist)
            self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)

            print("The world coordinate system's origin in camera's coordinate system:")
            print("===ir_camera rvec:")
            print(ir_rvec)
            print("===ir_camera rmat:")
            print(self.ir_rmat)
            print("===ir_camera tvec:")
            print(self.ir_tvec)
            print("ir_camera_shape: ")
            print(self.ir_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.ir_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.ir_rmat.T, self.ir_tvec))

            directory = '/home/chentao/kinect2_calibration'
            if not os.path.exists(directory):
                os.makedirs(directory)
            depth_stream = open("/home/chentao/kinect2_calibration/ir_camera_pose.yaml", "w")
            data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()}
            yaml.dump(data, depth_stream)

            
            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5)

    def rgb_callback(self,data):
        try:
        	self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
        rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
        cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
        cv2.imshow('rgb_img',self.rgb_img)
        cv2.waitKey(5)
        if rgb_ret == True:
            rgb_tempimg = self.rgb_img.copy()
            cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria)            
            cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret)
            rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist)
            self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec)
            print("The world coordinate system's origin in camera's coordinate system:")
            print("===rgb_camera rvec:")
            print(rgb_rvec)
            print("===rgb_camera rmat:")
            print(self.rgb_rmat)
            print("===rgb_camera tvec:")
            print(self.rgb_tvec)
            print("rgb_camera_shape: ")
            print(self.rgb_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.rgb_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.rgb_rmat.T, self.rgb_tvec))

            directory = '/home/chentao/kinect2_calibration'
            if not os.path.exists(directory):
                os.makedirs(directory)

            rgb_stream = open("/home/chentao/kinect2_calibration/rgb_camera_pose.yaml", "w")
            data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()}
            yaml.dump(data, rgb_stream)

            
            cv2.imshow('rgb_img',rgb_tempimg)
            cv2.waitKey(5)

    def mkgray(self, msg):
        """
        Convert a message into a 8-bit 1 channel monochrome OpenCV image
        """
        # as cv_bridge automatically scales, we need to remove that behavior
        # TODO: get a Python API in cv_bridge to check for the image depth.
        if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
            mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
            mono8 = np.array(np.clip(mono16, 0, 255), dtype=np.uint8)
            return mono8
        elif 'FC1' in msg.encoding:
            # floating point image handling
            img = self.br.imgmsg_to_cv2(msg, "passthrough")
            _, max_val, _, _ = cv2.minMaxLoc(img)
            if max_val > 0:
                scale = 255.0 / max_val
                mono_img = (img * scale).astype(np.uint8)
            else:
                mono_img = img.astype(np.uint8)
            return mono_img
        else:
        	return self.br.imgmsg_to_cv2(msg, "mono8")
Ejemplo n.º 3
0
from velodyne_msgs.msg import VelodyneScan

rclpy.init(args=None)

node = rclpy.create_node('blackfly')

# Modify the QoS profile of the publisher
'''qos_profile = QoSProfile(depth=10)
qos_profile.reliability = QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT
qos_profile.history = QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST
qos_profile.durability = QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_VOLATILE'''

publisher = node.create_publisher(Image, 'blackfly', 1)  #qos_profile)

br = CvBridge()
dtype, n_channels = br.encoding_to_dtype_with_channels('8UC3')


class TriggerType:
    SOFTWARE = 1
    HARDWARE = 2


CHOSEN_TRIGGER = TriggerType.SOFTWARE


class TriggerSubscriber(Node):
    def __init__(self):
        super().__init__('Trigger_Subscriber')
        self.subscription = self.create_subscription(VelodyneScan,
                                                     '/velodyne_packets',
Ejemplo n.º 4
0
class camera:
    def __init__(self):

        self.br = CvBridge()
        # self.ir_image_sub = rospy.Subscriber("/camera/ir/image_raw",Image,self.ir_callback)
        self.rgb_image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image,
                                              self.rgb_callback)
        self.ir_img = None
        self.rgb_img = None

        self.rgb_rmat = None
        self.rgb_tvec = None
        self.ir_rmat = None
        self.ir_tvec = None

    def isclose(x, y, rtol=1.e-5, atol=1.e-8):
        return bool(abs(x - y) <= atol + rtol * abs(y))

    def euler_angles_from_rotation_matrix(self, R):
        '''
        From a paper by Gregory G. Slabaugh (undated),
        "Computing Euler angles from a rotation matrix
        '''
        phi = 0.0
        #if self.isclose(R[2,0],-1.0):
        #theta = math.pi/2.0
        #psi = math.atan2(R[0,1],R[0,2])
        #elif self.isclose(R[2,0],1.0):
        #theta = -math.pi/2.0
        #psi = math.atan2(-R[0,1],-R[0,2])
        #else:
        theta = -math.asin(R[2, 0])
        cos_theta = math.cos(theta)
        psi = math.atan2(R[2, 1] / cos_theta, R[2, 2] / cos_theta)
        phi = math.atan2(R[1, 0] / cos_theta, R[0, 0] / cos_theta)
        return psi, theta, phi

    def ir_callback(self, data):
        try:
            self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img,
                                                       (x_num, y_num))
        cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
        cv2.imshow('ir_img', self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg, ir_corners, (11, 11), (-1, -1),
                             criteria)
            cv2.drawChessboardCorners(ir_tempimg, (x_num, y_num), ir_corners,
                                      ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(
                objpoints, ir_corners, depth_mtx, depth_dist)
            self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)

            print(
                "The world coordinate system's origin in camera's coordinate system:"
            )
            print("===ir_camera rvec:")
            print(ir_rvec)
            print("===ir_camera rmat:")
            print(self.ir_rmat)
            print("===ir_camera tvec:")
            print(self.ir_tvec)
            print("ir_camera_shape: ")
            print(self.ir_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.ir_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.ir_rmat.T, self.ir_tvec))

            depth_stream = open(
                "/home/qiang/SWLib/xtion_calib_tutorial/kinect_calibration/ir_camera_pose.yaml",
                "w")
            data = {
                'rmat': self.ir_rmat.tolist(),
                'tvec': self.ir_tvec.tolist()
            }
            yaml.dump(data, depth_stream)

            cv2.imshow('ir_img', ir_tempimg)
            cv2.waitKey(5)

    def rgb_callback(self, data):
        try:
            self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(self.rgb_img, cv2.COLOR_BGR2GRAY)
        rgb_ret, rgb_corners = cv2.findChessboardCorners(
            gray, (x_num, y_num), None)
        cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
        cv2.imshow('rgb_img', self.rgb_img)
        cv2.waitKey(5)
        if rgb_ret == True:
            rgb_tempimg = self.rgb_img.copy()
            cv2.cornerSubPix(gray, rgb_corners, (5, 5), (-1, -1), criteria)
            cv2.drawChessboardCorners(rgb_tempimg, (x_num, y_num), rgb_corners,
                                      rgb_ret)
            _, rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(
                objpoints, rgb_corners, rgb_mtx, rgb_dist)
            self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec)
            print(
                "The world coordinate system's origin in camera's coordinate system:"
            )
            print("===rgb_camera rvec:")
            print(rgb_rvec)
            print("===rgb_camera rmat:")
            print(self.rgb_rmat)
            type(self.rgb_rmat)
            psi, theta, phi = self.euler_angles_from_rotation_matrix(
                self.rgb_rmat)

            print("===rgb_camera rpy: %f,%f,%f", psi, theta, phi)

            print("===rgb_camera tvec:")
            print(self.rgb_tvec)
            print("rgb_camera_shape: ")
            print(self.rgb_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.rgb_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.rgb_rmat.T, self.rgb_tvec))

            rgb_stream = open(
                "/home/qiang/SWLib/xtion_calib_tutorial/kinect_calibration/rgb_camera_pose.yaml",
                "w")
            data = {
                'rmat': self.rgb_rmat.tolist(),
                'tvec': self.rgb_tvec.tolist()
            }
            yaml.dump(data, rgb_stream)

            cv2.imshow('rgb_img', rgb_tempimg)
            cv2.waitKey(5)

    def mkgray(self, msg):
        """
        Convert a message into a 8-bit 1 channel monochrome OpenCV image
        """
        # as cv_bridge automatically scales, we need to remove that behavior
        # TODO: get a Python API in cv_bridge to check for the image depth.
        if self.br.encoding_to_dtype_with_channels(
                msg.encoding)[0] in ['uint16', 'int16']:
            mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
            mono8 = np.array(np.clip(mono16, 0, 255), dtype=np.uint8)
            return mono8
        elif 'FC1' in msg.encoding:
            # floating point image handling
            img = self.br.imgmsg_to_cv2(msg, "passthrough")
            _, max_val, _, _ = cv2.minMaxLoc(img)
            if max_val > 0:
                scale = 255.0 / max_val
                mono_img = (img * scale).astype(np.uint8)
            else:
                mono_img = img.astype(np.uint8)
            return mono_img
        else:
            return self.br.imgmsg_to_cv2(msg, "mono8")
class registration:
    def __init__(self):

        self.br = CvBridge()
        #self.depth_image_sub = rospy.Subscriber("/camera/depth/image_raw",Image,self.depth_callback)

        # As you cannot get rgb and ir images simultaneously from kinect, you will need to get the coordinates 
        # of the corners separately on rgb image and ir image
        # To do this, uncomment the following two lines one at a time
        #self.rgb_image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.rgb_calib_callback)
        #self.ir_image_sub = rospy.Subscriber("/camera/ir/image_raw",Image,self.ir_calib_callback)
        self.ir_img = None
        self.rgb_img = None

        self.rgb_rmat = None
        self.rgb_tvec = None
        self.ir_rmat = None
        self.ir_tvec = None

        self.ir_to_rgb_rmat = None
        self.ir_to_rgb_tvec = None
        self.depth_image = None
        self.rgb_image = None
        self.rgb_corners = None
        self.ir_corners = None

        self.load_intrinsics()
        self.load_corners()
    
    def depth_callback(self,data):
    	try:
    		self.depth_image= self.br.imgmsg_to_cv2(data)
        except CvBridgeError as e:
            print(e)


    def ir_calib_callback(self,data):
        try:
            self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (y_num,x_num))
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (y_num,x_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)

            depth_stream = open("/home/chentao/kinect_calibration/ir_camera_corners.yaml", "w")
            data = {'corners':ir_corners.tolist()}
            yaml.dump(data, depth_stream)

            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5)

    def rgb_calib_callback(self,data):
        try:
            self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
        rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (y_num,x_num),None)

        cv2.imshow('rgb_img',self.rgb_img)
        cv2.waitKey(5)
        if rgb_ret == True:
            rgb_tempimg = self.rgb_img.copy()
            cv2.cornerSubPix(gray,rgb_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(rgb_tempimg, (y_num,x_num), rgb_corners,rgb_ret)

            rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_corners.yaml", "w")
            data = {'corners':rgb_corners.tolist()}
            yaml.dump(data, rgb_stream)

            cv2.imshow('rgb_img',rgb_tempimg)
            cv2.waitKey(5)

    def load_intrinsics(self):
        ir_stream = open("/home/chentao/kinect_calibration/depth_1504270110.yaml", "r")
        ir_doc = yaml.load(ir_stream)
        self.ir_mtx = np.array(ir_doc['camera_matrix']['data']).reshape(3,3)
        self.ir_dist = np.array(ir_doc['distortion_coefficients']['data'])
        ir_stream.close()

        rgb_stream = open("/home/chentao/kinect_calibration/rgb_1504270110.yaml", "r")
        rgb_doc = yaml.load(rgb_stream)
        self.rgb_mtx = np.array(rgb_doc['camera_matrix']['data']).reshape(3,3)
        self.rgb_dist = np.array(rgb_doc['distortion_coefficients']['data'])
        rgb_stream.close()

    def load_corners(self):
        ir_stream = open("/home/chentao/kinect_calibration/ir_camera_corners.yaml", "r")
        ir_doc = yaml.load(ir_stream)
        self.ir_corners = np.array(ir_doc['corners']).reshape(-1,2).astype('float32')
        ir_stream.close()

        rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_corners.yaml", "r")
        rgb_doc = yaml.load(rgb_stream)
        self.rgb_corners = np.array(rgb_doc['corners']).reshape(-1,2)
        self.rgb_corners = self.rgb_corners.astype('float32')
        rgb_stream.close()


    def ir_to_rgb(self):
        if self.rgb_corners != None and self.ir_corners != None:

            _,_,_,_,_,R,T,E,F = cv2.stereoCalibrate([objpoints], [self.ir_corners], [self.rgb_corners],(480, 640), self.ir_mtx, self.ir_dist, self.rgb_mtx, self.rgb_dist, flags = cv2.cv.CV_CALIB_FIX_INTRINSIC)
            print "R:"
            print R
            print "T:"
            print T

    def register_depth_to_rgb(self):
    	if self.depth_image == None or self.rgb_image == None:
    		return

    	self.registered_depth = np.zeros(self.depth_image.shape)
    	self.registered_rgb = np.zeros(self.rgb_image.shape)
    	for row_i in range(self.depth_image.shape[0]):
    		for column_j in range(self.depth_image.shape[1]):
    			depth_pix_point = np.array([column_j, row_i, self.depth_image[row_i, column_j]])   #(x,y,z)
    			depth_coord_point = np.dot(np.linalg.inv(self.depth_mtx), depth_pix_point)
    			rgb_coord_point = np.dot(self.ir_to_rgb_rmat, depth_coord_point).reshape(3,1) + self.ir_to_rgb_tvec
    			rgb_pix_point = np.dot(self.rgb_mtx, rgb_coord_point)
    			rgb_x = int(rgb_pix_point[0] / float(rgb_pix_point[2]))
    			rgb_y = int(rgb_pix_point[1] / float(rgb_pix_point[2]))
    			rgb_x = np.clip(rgb_x, 0, self.rgb_image.shape[1] - 1)
                rgb_y = np.clip(rgb_y, 0, self.rgb_image.shape[0] - 1)
                self.registered_rgb[row_i, column_j, :] = self.rgb_image[rgb_y, rgb_x, :]
        cv2.imshow("Registered_rgb", self.registered_rgb)
        cv2.imshow("RGB", self.rgb_image)
        cv2.imshow("Depth",self.depth_image)
        cv2.waitKey(10)





    def get_ir_to_rgb_rmat(self):
        if self.ir_to_rgb_rmat != None:
            return self.ir_to_rgb_rmat


    def get_ir_to_rgb_tvec(self):
        if self.ir_to_rgb_tvec != None:
            return self.ir_to_rgb_tvec

    def mkgray(self, msg):
        """
        Convert a message into a 8-bit 1 channel monochrome OpenCV image
        """
        # as cv_bridge automatically scales, we need to remove that behavior
        # TODO: get a Python API in cv_bridge to check for the image depth.
        if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
            mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
            mono8 = np.array(np.clip(mono16, 0, 255), dtype=np.uint8)
            return mono8
        elif 'FC1' in msg.encoding:
            # floating point image handling
            img = self.br.imgmsg_to_cv2(msg, "passthrough")
            _, max_val, _, _ = cv2.minMaxLoc(img)
            if max_val > 0:
                scale = 255.0 / max_val
                mono_img = (img * scale).astype(np.uint8)
            else:
                mono_img = img.astype(np.uint8)
            return mono_img
        else:
            return self.br.imgmsg_to_cv2(msg, "mono8")