def find_extrinsics(self): # Update image mask cv.Circle(self.im_mask,(int(self.undistorted_plate.point.x),int(self.undistorted_plate.point.y)), int(self.mask_radius), self.color_max, cv.CV_FILLED) cv.And(self.im,self.im_mask,self.im) (corners_status, corners) = cv.FindChessboardCorners(self.im, self.pattern_size) if corners_status and (len(corners) == self.board_corner_number): sub_corners = cv.FindCornerSubPix(self.im, corners, self.win, self.zero_zone, self.criteria) cv.DrawChessboardCorners(self.im_display, self.pattern_size, sub_corners, corners_status) self.add_board_to_data(corners) cv.FindExtrinsicCameraParams2(self.plate_points, self.image_points, self.intrinsic_matrix, self.distortion_coeffs, self.rvec, self.tvec) self.rvec_array = cvNumpy.mat_to_array(self.rvec).squeeze() self.tvec_array = cvNumpy.mat_to_array(self.tvec).squeeze() rvec_angle = numpy.linalg.norm(self.rvec_array) R = tf.transformations.rotation_matrix(rvec_angle,self.rvec_array) T = tf.transformations.translation_matrix(self.tvec_array) Wsub = numpy.zeros((3,3)) Wsub[:,:-1] = R[:-1,:-2] Wsub[:,-1] = T[:-1,-1] Hinv = numpy.dot(self.M,Wsub) Hinv = Hinv/Hinv[-1,-1] H = numpy.linalg.inv(Hinv) self.Hinv = Hinv self.H = H self.tf_broadcaster.sendTransform(self.tvec_array, tf.transformations.quaternion_about_axis(rvec_angle, self.rvec_array), rospy.Time.now(), "Checkerboard", "Image") # rospy.logwarn("H = \n%s", str(H)) # rvec_array = cvNumpy.mat_to_array(self.rvec) # display_text = "rvec = " + str(rvec_array[0]) # cv.PutText(self.im_display,display_text,(25,420),self.font,self.font_color) # tvec_array = cvNumpy.mat_to_array(self.tvec) # display_text = "tvec = " + str(tvec_array[0]) # cv.PutText(self.im_display,display_text,(25,440),self.font,self.font_color) # self.draw_origin(self.im_display) # rvec_array = numpy.array([-3.06,-0.0014,0.0042]) # tvec_array = numpy.array([3.9,46.7,335.9]) # rvec_array = rvec_array.astype('float32') # tvec_array = tvec_array.astype('float32') # self.rvec = cvNumpy.array_to_mat(rvec_array) # self.tvec = cvNumpy.array_to_mat(tvec_array) self.draw_origin(self.im_display) self.remap_extrinsics()
def __init__(self): (intrinsic_matrix,distortion_coeffs) = CameraParameters.intrinsic("undistorted") (rvec,tvec) = CameraParameters.extrinsic("plate") intrinsic_matrix = cvNumpy.mat_to_array(intrinsic_matrix) rvec = cvNumpy.mat_to_array(rvec).squeeze() tvec = cvNumpy.mat_to_array(tvec).squeeze() # Listener/Subscribers self.tf_listener = tf.TransformListener() # self.Hinv = numpy.array([\ # [2.02, 7.57e-3, 3.18e2], # [2.24e-3, -2, 2.40e2], # [1.6e-5,2.56e-5,1]]) # alpha, beta, gamma = 0.123, -1.234, 2.345 # origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1) # I = tf.transformations.identity_matrix() # Rx = tf.transformations.rotation_matrix(alpha, xaxis) # rospy.logwarn("Rx = %s", str(Rx)) rvec_angle = numpy.linalg.norm(rvec) R = tf.transformations.rotation_matrix(rvec_angle,rvec) T = tf.transformations.translation_matrix(tvec) # rospy.logwarn("R = \n%s", str(R)) # rospy.logwarn("T = \n%s", str(T)) Wsub = numpy.zeros((3,3)) Wsub[:,:-1] = R[:-1,:-2] Wsub[:,-1] = T[:-1,-1] # rospy.logwarn("Wsub = \n%s", str(Wsub)) # M = intrinsic_matrix # rospy.logwarn("M = \n%s", str(M)) Hinv = numpy.dot(M,Wsub) Hinv = Hinv/Hinv[-1,-1] # rospy.logwarn("Hinv = \n%s", str(Hinv)) R = numpy.zeros((4,4)) R[-1,-1] = 1 R[-2,-2] = 1 R[:2,:2] = Hinv[:2,:2] # rospy.logwarn("R = \n%s", str(R)) T = Hinv[:-1,-1] # rospy.logwarn("T = \n%s", str(T)) # q = tf.transformations.quaternion_from_matrix(R) # rospy.logwarn("q = \n%s", str(q)) H = numpy.linalg.inv(Hinv) self.Hinv = Hinv self.H = H
def __init__(self): self.initialized = False self.images_initialized = False self.undistorted_saved = False self.distorted_saved = False cv.NamedWindow("Presentation", 1) self.tf_listener = tf.TransformListener() self.tf_broadcaster = tf.TransformBroadcaster() self.bridge = CvBridge() self.image_undistorted_sub = rospy.Subscriber("UndistortedImage", Image, self.image_undistorted_callback) self.image_undistorted_sub = rospy.Subscriber("OriginalImage", Image, self.image_distorted_callback) self.joy_sub = rospy.Subscriber("Joystick/Commands", JoystickCommands, self.joy_callback) self.color_max = 255 self.font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,0.5,0.5) self.font_color = cv.CV_RGB(self.color_max,0,0) self.camera_plate = PointStamped() self.camera_plate.header.frame_id = "Camera" self.camera_origin = PointStamped() self.camera_origin.header.frame_id = "Camera" self.camera_origin.point.x = 0 self.camera_origin.point.y = 0 # self.image_plate_origin_found = False # self.plate_origin = PointStamped() # self.plate_origin.header.frame_id = "Plate" # self.plate_origin.point.x = 0 # self.plate_origin.point.y = 0 (self.intrinsic_matrix,self.distortion_coeffs) = CameraParameters.intrinsic("undistorted") self.KK_cx = self.intrinsic_matrix[0,2] self.KK_cy = self.intrinsic_matrix[1,2] self.M = cvNumpy.mat_to_array(self.intrinsic_matrix) # Makes with respect to Camera coordinate system instead of Undistorted self.M[:-1,-1] = 0 # Pattern info self.pattern_size = (8,6) self.col_corner_number = self.pattern_size[0] self.row_corner_number = self.pattern_size[1] self.board_corner_number = self.col_corner_number * self.row_corner_number self.checker_size = 15 self.win = (4,4) self.zero_zone = (2,2) self.criteria = (cv.CV_TERMCRIT_ITER+cv.CV_TERMCRIT_EPS,100,.01) self.image_points = cv.CreateMat(self.board_corner_number,2,cv.CV_32FC1) self.plate_points = cv.CreateMat(self.board_corner_number,3,cv.CV_32FC1) self.point_counts = cv.CreateMat(1,1,cv.CV_32SC1) self.rvec = cv.CreateMat(1,3,cv.CV_32FC1) self.tvec = cv.CreateMat(1,3,cv.CV_32FC1) self.origin_points = cv.CreateMat(4,3,cv.CV_32FC1) self.origin_points_projected = cv.CreateMat(4,2,cv.CV_32FC1) self.rvec_zeros = cv.SetZero(cv.CreateMat(1,3,cv.CV_32FC1)) self.tvec_zeros = cv.SetZero(cv.CreateMat(1,3,cv.CV_32FC1)) self.initialized = True
def CameraInfo_callback (self, msgCameraInfo): if self.camerainfo is None: self.camerainfo = msgCameraInfo M = N.reshape(N.array(self.camerainfo.K),[3,3]) #cvNumpy.mat_to_array(N.array(self.camerainfo.K)) #M = N.reshape(N.array(self.camerainfo.P),[3,4])[0:3,0:3] M[:-1,-1] = 0 # Zero the translation entries (1,3) and (2,3). (rvec, tvec) = CameraParameters.extrinsic("plate") rvec = cvNumpy.mat_to_array(rvec).squeeze() tvec = cvNumpy.mat_to_array(tvec).squeeze() angleRvec = N.linalg.norm(rvec) R = tf.transformations.rotation_matrix(angleRvec, rvec) T = tf.transformations.translation_matrix(tvec) self.Wsub = N.zeros((3,3)) self.Wsub[:,:-1] = R[:-1,:-2] self.Wsub[:,-1] = T[:-1,-1] self.Hinv = N.dot(M, self.Wsub) #rospy.logwarn ('Hinv scalar %f' % self.Hinv[-1,-1]) self.Hinv = self.Hinv / self.Hinv[-1,-1] self.H = N.linalg.inv(self.Hinv)
def remap_extrinsics(self): X = [self.camera_plate.point.x, self.camera_plate.point.x + self.checker_size] Y = [self.camera_plate.point.y, self.camera_plate.point.y] Z = [1,1] camera_points = numpy.array([X,Y,Z]) plate_points = numpy.dot(self.H,camera_points) x0 = plate_points[0,0] x1 = plate_points[0,1] y0 = plate_points[1,0] y1 = plate_points[1,1] rot_angle = numpy.arctan2((y1-y0),(x1-x0)) # rospy.logwarn("plate_points = %s", str(plate_points)) checkerboard_plate_vector = plate_points[:,0] checkerboard_plate_vector[2] = 0 self.tf_broadcaster.sendTransform(checkerboard_plate_vector, tf.transformations.quaternion_from_euler(0, 0, rot_angle), rospy.Time.now(), "Plate", "Checkerboard") try: # self.image_plate_origin = self.tf_listener.transformPoint("Image",self.plate_origin) (trans,rot_quat) = self.tf_listener.lookupTransform('Image', 'Plate', rospy.Time(0)) # rospy.logwarn("trans = %s", str(trans)) # rospy.logwarn("rot = %s", str(rot)) rot_array = tf.transformations.quaternion_matrix(rot_quat) rot_array = rot_array[0:3,0:3] rot_array = rot_array.astype('float32') rot_mat = cvNumpy.array_to_mat(rot_array) cv.Rodrigues2(rot_mat,self.rvec) rvec_array_new = cvNumpy.mat_to_array(self.rvec).squeeze() # tvec_array_new = numpy.array([self.image_plate_origin.point.x, # self.image_plate_origin.point.y, # self.image_plate_origin.point.z]) tvec_array_new = numpy.array(trans) self.rvec_array = rvec_array_new self.tvec_array = tvec_array_new self.rvec = cvNumpy.array_to_mat(self.rvec_array) self.tvec = cvNumpy.array_to_mat(self.tvec_array) display_text = "rvec = [%0.3f, %0.3f, %0.3f]" % (self.rvec_array[0],self.rvec_array[1],self.rvec_array[2]) cv.PutText(self.im_display,display_text,(25,420),self.font,self.font_color) display_text = "tvec = [%0.3f, %0.3f, %0.3f]" % (self.tvec_array[0],self.tvec_array[1],self.tvec_array[2]) cv.PutText(self.im_display,display_text,(25,440),self.font,self.font_color) # self.image_plate_origin_found = True self.draw_origin(self.im_display) except (tf.LookupException, tf.ConnectivityException): pass
def __init__(self): (intrinsic_matrix,distortion_coeffs) = CameraParameters.intrinsic("undistorted") (rvec,tvec) = CameraParameters.extrinsic("plate") intrinsic_matrix = cvNumpy.mat_to_array(intrinsic_matrix) rvec = cvNumpy.mat_to_array(rvec).squeeze() tvec = cvNumpy.mat_to_array(tvec).squeeze() rvec_angle = numpy.linalg.norm(rvec) R = tf.transformations.rotation_matrix(rvec_angle,rvec) T = tf.transformations.translation_matrix(tvec) Wsub = numpy.zeros((3,3)) Wsub[:,:-1] = R[:-1,:-2] Wsub[:,-1] = T[:-1,-1] M = intrinsic_matrix # Makes with respect to Camera coordinate system instead of Undistorted M[:-1,-1] = 0 Hinv = numpy.dot(M,Wsub) Hinv = Hinv/Hinv[-1,-1] H = numpy.linalg.inv(Hinv) self.Hinv = Hinv self.H = H
def draw_origin(self,im_color): if self.camerainfo is not None: cv.SetZero(self.origin_points) cv.SetReal2D(self.origin_points,1,0,self.checker_size) # x-direction cv.SetReal2D(self.origin_points,2,1,self.checker_size) # y-direction cv.SetReal2D(self.origin_points,3,2,self.checker_size) # z-direction axis_line_width = 3 rect_line_width = 2 cvmatK = cv.fromarray(N.reshape(self.camerainfo.K,[3,3])) cvmatD = cv.fromarray(N.reshape(self.camerainfo.D,[5,1])) cv.ProjectPoints2(self.origin_points, self.rvec, self.tvec, cvmatK, cvmatD, self.origin_points_projected) try: a = cvNumpy.mat_to_array(self.origin_points_projected) # origin point pt1 = tuple(a[0]) # draw x-axis pt2 = tuple(a[1]) cv.Line(im_color,pt1,pt2,cv.CV_RGB(self.color_max,0,0),axis_line_width) # draw y-axis pt2 = tuple(a[2]) cv.Line(im_color,pt1,pt2,cv.CV_RGB(0,self.color_max,0),axis_line_width) # draw z-axis pt2 = tuple(a[3]) cv.Line(im_color,pt1,pt2,cv.CV_RGB(0,0,self.color_max),axis_line_width) # if self.image_plate_origin_found: # self.image_plate_origin = pt1 # self.image_plate_origin_found = False # display_text = "image_plate_origin = " + str(self.image_plate_origin) # cv.PutText(self.im_display,display_text,(25,85),self.font,self.font_color) # display_text = "image_plate_origin = (%0.0f, %0.0f)" % (self.image_plate_origin[0],self.image_plate_origin[1]) # cv.PutText(self.im_display,display_text,(25,85),self.font,self.font_color) except: pass
def remap_extrinsics(self): # self.find_H_image_to_plate() # display_text = "undistorted_plate.point = " + str(int(self.undistorted_plate.point.x)) + "," + str(int(self.undistorted_plate.point.y)) # cv.PutText(self.im_display,display_text,(25,300),self.font,self.font_color) # Ximage = [self.undistorted_plate.point.x.tolist()] # Yimage = [self.undistorted_plate.point.y.tolist()] # Xplate,Yplate = self.image_to_plate(Ximage,Yimage) # display_text = "new origin point = " + str(int(Xplate)) + "," + str(int(Yplate)) # cv.PutText(self.im_display,display_text,(25,320),self.font,self.font_color) # cv.SetZero(self.distortion_coeffs) # rotation_matrix = cv.CreateMat(3,3,cv.CV_32FC1) # cv.Rodrigues2(self.rvec, rotation_matrix) # R = cvNumpy.mat_to_array(rotation_matrix) # T = cvNumpy.mat_to_array(self.tvec).transpose() # W = numpy.concatenate((R,numpy.zeros((1,3))),0) # T = numpy.append(T,1) # T = numpy.array([T]).transpose() # rospy.logwarn("R = %s", str(R)) # rospy.logwarn("T = %s", str(T)) # W = numpy.concatenate((W,T),1) # rospy.logwarn("W = %s", str(W)) # T2 = numpy.array([36.1,55.3,0,1]) # T2 = numpy.array([10,0,0,1]) # T2 = numpy.array([T2]).transpose() # ang = 1.086 # ang = -1.086 # R2 = numpy.array([[math.cos(ang), math.sin(ang), 0], # [-math.sin(ang), math.cos(ang), 0], # [0,0,1]]) # rospy.logwarn("R2 = %s", str(R2)) # rospy.logwarn("T2 = %s", str(T2)) # W2 = numpy.concatenate((R2,numpy.zeros((1,3))),0) # W2 = numpy.concatenate((W2,T2),1) # rospy.logwarn("W2 = %s", str(W2)) # W3 = numpy.dot(W,W2) # W3 = numpy.dot(W2,W) # rospy.logwarn("W3 = %s", str(W3)) # tvec_new = W3[:-1,-1] # R_new = W3[:-1,:-1] # R_new = R_new.astype('float32') # rvec_new = cv.CreateMat(1,3,cv.CV_32FC1) # R_new = cvNumpy.array_to_mat(R_new) # cv.Rodrigues2(R_new,rvec_new) X = [self.camera_plate.point.x, self.camera_plate.point.x + self.checker_size] Y = [self.camera_plate.point.y, self.camera_plate.point.y] Z = [1,1] camera_points = numpy.array([X,Y,Z]) plate_points = numpy.dot(self.H,camera_points) x0 = plate_points[0,0] x1 = plate_points[0,1] y0 = plate_points[1,0] y1 = plate_points[1,1] rot_angle = numpy.arctan2((y1-y0),(x1-x0)) # rospy.logwarn("plate_points = %s", str(plate_points)) checkerboard_plate_vector = plate_points[:,0] checkerboard_plate_vector[2] = 0 self.tf_broadcaster.sendTransform(checkerboard_plate_vector, tf.transformations.quaternion_from_euler(0, 0, rot_angle), rospy.Time.now(), "Plate", "Checkerboard") try: # self.image_plate_origin = self.tf_listener.transformPoint("Image",self.plate_origin) (trans,rot_quat) = self.tf_listener.lookupTransform('Image', 'Plate', rospy.Time(0)) # rospy.logwarn("trans = %s", str(trans)) # rospy.logwarn("rot = %s", str(rot)) rot_array = tf.transformations.quaternion_matrix(rot_quat) rot_array = rot_array[0:3,0:3] rot_array = rot_array.astype('float32') rot_mat = cvNumpy.array_to_mat(rot_array) cv.Rodrigues2(rot_mat,self.rvec) rvec_array_new = cvNumpy.mat_to_array(self.rvec).squeeze() # tvec_array_new = numpy.array([self.image_plate_origin.point.x, # self.image_plate_origin.point.y, # self.image_plate_origin.point.z]) tvec_array_new = numpy.array(trans) self.rvec_array = rvec_array_new self.tvec_array = tvec_array_new self.rvec = cvNumpy.array_to_mat(self.rvec_array) self.tvec = cvNumpy.array_to_mat(self.tvec_array) display_text = "rvec = [%0.3f, %0.3f, %0.3f]" % (self.rvec_array[0],self.rvec_array[1],self.rvec_array[2]) cv.PutText(self.im_display,display_text,(25,420),self.font,self.font_color) display_text = "tvec = [%0.3f, %0.3f, %0.3f]" % (self.tvec_array[0],self.tvec_array[1],self.tvec_array[2]) cv.PutText(self.im_display,display_text,(25,440),self.font,self.font_color) # self.image_plate_origin_found = True self.draw_origin(self.im_display) except (tf.LookupException, tf.ConnectivityException): pass
def draw_grid(self, im_color): if self.camerainfo is not None: points_range = (N.array(range(self.num_grid_lines)) - (self.num_grid_lines-1)/2) * self.checker_size points_zeros = N.zeros(points_range.shape) points_ones = N.ones(points_range.shape) * self.checker_size * (self.num_grid_lines - 1)/2 x_start_points_array = N.array([-points_ones, points_range, points_zeros]).astype('float32') x_end_points_array = N.array([points_ones, points_range, points_zeros]).astype('float32') y_start_points_array = N.array([points_range, -points_ones, points_zeros]).astype('float32') y_end_points_array = N.array([points_range, points_ones, points_zeros]).astype('float32') if self.rotate_grid: x_start_points_array = self.from_homo(N.dot(self.T_stage_plate,self.to_homo(x_start_points_array))) x_end_points_array = self.from_homo(N.dot(self.T_stage_plate,self.to_homo(x_end_points_array))) y_start_points_array = self.from_homo(N.dot(self.T_stage_plate,self.to_homo(y_start_points_array))) y_end_points_array = self.from_homo(N.dot(self.T_stage_plate,self.to_homo(y_end_points_array))) self.rotate_grid = False x_start_points = cvNumpy.array_to_mat(x_start_points_array.transpose()) x_end_points = cvNumpy.array_to_mat(x_end_points_array.transpose()) y_start_points = cvNumpy.array_to_mat(y_start_points_array.transpose()) y_end_points = cvNumpy.array_to_mat(y_end_points_array.transpose()) axis_line_width = 1 cvmatK = cv.fromarray(N.reshape(self.camerainfo.K,[3,3])) cvmatD = cv.fromarray(N.reshape(self.camerainfo.D,[5,1])) cv.ProjectPoints2(x_start_points, self.rvec, self.tvec, cvmatK, cvmatD, self.start_points_projected) cv.ProjectPoints2(x_end_points, self.rvec, self.tvec, cvmatK, cvmatD, self.end_points_projected) start_points = cvNumpy.mat_to_array(self.start_points_projected) end_points = cvNumpy.mat_to_array(self.end_points_projected) for line_n in range(self.num_grid_lines): cv.Line(im_color, tuple(start_points[line_n,:]), tuple(end_points[line_n,:]), cv.CV_RGB(self.color_max,0,0), axis_line_width) cv.ProjectPoints2(y_start_points, self.rvec, self.tvec, cvmatK, cvmatD, self.start_points_projected) cv.ProjectPoints2(y_end_points, self.rvec, self.tvec, cvmatK, cvmatD, self.end_points_projected) start_points = cvNumpy.mat_to_array(self.start_points_projected) end_points = cvNumpy.mat_to_array(self.end_points_projected) for line_n in range(self.num_grid_lines): cv.Line(im_color, tuple(start_points[line_n,:]), tuple(end_points[line_n,:]), cv.CV_RGB(0,self.color_max,0), axis_line_width)
def draw_grid(self,im_color): points_range = (numpy.array(range(self.num_grid_lines)) - (self.num_grid_lines-1)/2) * self.checker_size points_zeros = numpy.zeros(points_range.shape) points_ones = numpy.ones(points_range.shape) * self.checker_size * (self.num_grid_lines - 1)/2 x_start_points_array = numpy.array([-points_ones,points_range,points_zeros]).astype('float32') x_end_points_array = numpy.array([points_ones,points_range,points_zeros]).astype('float32') y_start_points_array = numpy.array([points_range,-points_ones,points_zeros]).astype('float32') y_end_points_array = numpy.array([points_range,points_ones,points_zeros]).astype('float32') if self.rotate_grid: x_start_points_array = self.from_homo(numpy.dot(self.T_stage_plate,self.to_homo(x_start_points_array))) x_end_points_array = self.from_homo(numpy.dot(self.T_stage_plate,self.to_homo(x_end_points_array))) y_start_points_array = self.from_homo(numpy.dot(self.T_stage_plate,self.to_homo(y_start_points_array))) y_end_points_array = self.from_homo(numpy.dot(self.T_stage_plate,self.to_homo(y_end_points_array))) self.rotate_grid = False x_start_points = cvNumpy.array_to_mat(x_start_points_array.transpose()) x_end_points = cvNumpy.array_to_mat(x_end_points_array.transpose()) y_start_points = cvNumpy.array_to_mat(y_start_points_array.transpose()) y_end_points = cvNumpy.array_to_mat(y_end_points_array.transpose()) axis_line_width = 1 cv.ProjectPoints2(x_start_points, self.rvec, self.tvec, self.intrinsic_matrix, self.distortion_coeffs, self.start_points_projected) cv.ProjectPoints2(x_end_points, self.rvec, self.tvec, self.intrinsic_matrix, self.distortion_coeffs, self.end_points_projected) start_points = cvNumpy.mat_to_array(self.start_points_projected) end_points = cvNumpy.mat_to_array(self.end_points_projected) for line_n in range(self.num_grid_lines): cv.Line(im_color,tuple(start_points[line_n,:]),tuple(end_points[line_n,:]),cv.CV_RGB(self.color_max,0,0),axis_line_width) cv.ProjectPoints2(y_start_points, self.rvec, self.tvec, self.intrinsic_matrix, self.distortion_coeffs, self.start_points_projected) cv.ProjectPoints2(y_end_points, self.rvec, self.tvec, self.intrinsic_matrix, self.distortion_coeffs, self.end_points_projected) start_points = cvNumpy.mat_to_array(self.start_points_projected) end_points = cvNumpy.mat_to_array(self.end_points_projected) for line_n in range(self.num_grid_lines): cv.Line(im_color,tuple(start_points[line_n,:]),tuple(end_points[line_n,:]),cv.CV_RGB(0,self.color_max,0),axis_line_width)
def remap_extrinsics(self, now): X = [self.originPlate.point.x, self.originPlate.point.x + self.checker_size] Y = [self.originPlate.point.y, self.originPlate.point.y] Z = [1, 1] camera_points = N.array([X,Y,Z]) plate_points = N.dot(self.H, camera_points) x0 = plate_points[0,0] x1 = plate_points[0,1] y0 = plate_points[1,0] y1 = plate_points[1,1] angleRot = N.arctan2((y1-y0),(x1-x0)) # rospy.logwarn("plate_points = %s", str(plate_points)) checkerboard_plate_vector = plate_points[:,0] checkerboard_plate_vector[2] = 0 self.tfbx.sendTransform(checkerboard_plate_vector, tf.transformations.quaternion_from_euler(0, 0, angleRot), now, FRAME_PLATE, FRAME_CHECKERBOARD) try: # self.image_plate_origin = self.tfrx.transformPoint("Image",self.plate_origin) self.tfrx.waitForTransform(FRAME_IMAGERECT, FRAME_PLATE, now, rospy.Duration(1.0)) (trans, quat) = self.tfrx.lookupTransform(FRAME_IMAGERECT, FRAME_PLATE, now) # rospy.logwarn("trans = %s", str(trans)) # rospy.logwarn("rot = %s", str(rot)) rot_array = tf.transformations.quaternion_matrix(quat) rot_array = rot_array[0:3,0:3] rot_array = rot_array.astype('float32') rot_mat = cvNumpy.array_to_mat(rot_array) cv.Rodrigues2(rot_mat, self.rvec) self.rvec_array = cvNumpy.mat_to_array(self.rvec).squeeze() self.tvec_array = N.array(trans) self.rvec = cvNumpy.array_to_mat(self.rvec_array) self.tvec = cvNumpy.array_to_mat(self.tvec_array) display_text = "rvec = [%0.3f, %0.3f, %0.3f]" % (self.rvec_array[0], self.rvec_array[1], self.rvec_array[2]) cv.PutText(self.im_display, display_text, (25,420), self.font, self.font_color) display_text = "tvec = [%0.3f, %0.3f, %0.3f]" % (self.tvec_array[0], self.tvec_array[1], self.tvec_array[2]) cv.PutText(self.im_display, display_text, (25,440), self.font, self.font_color) # self.image_plate_origin_found = True self.draw_origin(self.im_display) except (tf.Exception): pass