def solve(self, pts3d, p2, estimate): Rvec = estimate.get('rvec', self.X0[:3]) Rmat, _ = cv2.Rodrigues(Rvec) cam_pos = self.X0[3:] #cam_pos = (-Rmat.T @ Tvec).flatten() bounds = ([-np.inf] * 6, [np.inf] * 6) if 'alt' in estimate: cam_pos[2] = estimate['alt'] zeps = 0.1 bounds[0][5] = cam_pos[2] - zeps bounds[1][5] = cam_pos[2] + zeps eu_angls = utils.rotationMatrixToEulerAngles(Rmat) if 'rvec' in estimate: reps = np.radians(self.rotation_bounds) #yaw pitch roll !!! todo: solve the 180 problem maybe transfer point first!!! bounds[0][:3] = eu_angls - reps bounds[1][:3] = eu_angls + reps estimation_vec = np.clip(self.X0, bounds[0], bounds[1]) resPnP, res = myPnP_Euler(pts3d, p2, self.K, self.distortion, estimation_vec, bounds) self.X0 = res Rvec, _ = cv2.Rodrigues(utils.eulerAnglesToRotationMatrix(res[:3])) #converting camera position to Tvec Rmat, _ = cv2.Rodrigues(Rvec) Tvec = -Rmat @ res[3:] return resPnP, Rvec, Tvec
def dump_bodies(asf_skeleton, skeleton_xml): """ Given an XML element (an ETElement), dump the skeleton's joint objects as the element's children This method expects all joint angles in the skeleton to be zero. Else, all the axes and angles will be messed up It also handles all the calculations concerning axes and joints """ # Ensure all positions are at their "default" values; if the skeleton's pose # is different from that in the ASF file, everything will be off :| asf_skeleton.update_joint_positions() for joint in [asf_skeleton.root] + asf_skeleton.joints: body_xml = ET.SubElement(skeleton_xml, "body") body_xml.set("name", bodyname(joint)) ################################ # POSITION AND COORDINATE AXES # ################################ rmatrix = joint.ctrans tform_text = vec2string(np.append(joint.base_pos, euler_from_matrix(rmatrix[:3, :3], axes="rxyz"))) ET.SubElement(body_xml, "transformation").text = tform_text ######################################## # VISUALIZATION AND COLLISION GEOMETRY # ######################################## # Direction vectors and axes are specified wrt to global reference # frame in asf files (and thus in joints), so we construct a # transformation to the local reference frame (as dart expects it) local_direction = np.matmul(joint.ctrans_inv[:3, :3], joint.direction) direction_matrix = utils.rmatrix_x2v(local_direction) rangles = utils.rotationMatrixToEulerAngles(direction_matrix) trans_offset = joint.length * local_direction / 2 tform_vector = np.append(trans_offset, rangles) for shape in ["visualization", "collision"]: shape_xml = ET.SubElement(body_xml, shape + "_shape") ET.SubElement(shape_xml, "transformation").text = \ vec2string(tform_vector) add_box(shape_xml, joint.length) ################### # INERTIA SECTION # ################### inertia_xml = ET.SubElement(body_xml, "inertia") mass_xml = ET.SubElement(inertia_xml, "mass") mass_xml.text = str(1) ET.SubElement(inertia_xml, "offset").text = vec2string(trans_offset)
def get_roll(H): data = np.load('calib.npz') K = data['mtx'] num, Rs, Ts, Ns = cv2.decomposeHomographyMat(H, K) # print(num) for i in range(num): R = Rs[i] if isRotationMatrix(R): angles = rotationMatrixToEulerAngles(R) # print(angles) return abs(angles[2]) return 0
import cv2 import numpy as np from utils import isRotationMatrix, rotationMatrixToEulerAngles data = np.load('calib.npz') K = data['mtx'] print(K) data = np.load('h**o.npz') H = data['h**o'] print(H) num, Rs, Ts, Ns = cv2.decomposeHomographyMat(H, K) print(num) for i in range(num): R = Rs[i] if isRotationMatrix(R): angles = rotationMatrixToEulerAngles(R) print(angles) # print(Ts) # print(Ns)
def load_kitty_poses(location, pose_file): poses = SortedDict() filename = '{}/poses/{}.txt'.format(location, pose_file) id = 0 with open(filename, 'r') as fp: for line in fp.readlines(): poses[id] = Pose(line, location, pose_file) id += 1 return poses location = '/Users/weihao/BlueNoteData/dataset' pose_file = '00' poses = load_kitty_poses(location, pose_file) focal = 719 #719 # cam = PinholeCamera(1241.0, 376.0, focal, focal, 607.1928, 185.2157) cam = PinholeCamera(1241.0, 376.0, focal, focal, 620.5, 188.0) print('Total images {}'.format(len(poses))) with open('w.txt', 'w') as fp: pose_pre = None for id in poses: pose = poses[id] if pose_pre is not None: t = pose.get_tran(pose_pre) d = rotationMatrixToEulerAngles(pose.get_direction(pose_pre, cam)) fp.write('{} {} {} {} {} {} {}\n'.format(id, d[0], d[1], d[2], t[0],t[1], t[2])) pose_pre = pose
def plot3d(): fig = plt.figure(figsize=(8,6)) ax1 = fig.add_subplot(2, 2, 1, projection='3d') ax1.set_title('3D') ax2 = fig.add_subplot(2, 2, 2) ax2.set_title('XYZ') ax3 = fig.add_subplot(2, 2, 4) ax3.set_title('2D XY plot') ax4 = fig.add_subplot(2, 2, 3) ax4.set_title('Angles') fig.canvas.draw() # note that the first draw comes before setting data #fig.canvas.mpl_connect('close_event', handle_close) #h1 = ax1.plot([0,1],[0,1],[0,1], lw=3)[0] camera=None pts3d=None #text = ax1.text(0.8,1.5, '') ax1.set_ylim([-2,2]) ax1.set_xlim([-2,2]) ax1.set_zlim([2,-2]) ax3.set_ylim([-2,2]) ax3.set_xlim([-2,2]) ax3.axis('equal') t_start = time.time() camera_t_data=[] camera_r_data=[] camera_t_data_gt=[] camera_r_data_gt=[] camera_ttags=[] camera_gt_ttags=[] cam_pos_h=None i=0 cam_cnt=0 mem_len=1000 while True: cmd,data=yield if cmd=='camera': ttag,R,T=data camera_t_data.append(T) camera_t_data=camera_t_data[-mem_len:] camera_r_data.append(utils.rotationMatrixToEulerAngles(R)*180.0/np.pi) camera_r_data=camera_r_data[-mem_len:] camera_ttags.append(ttag-t_start) camera_ttags=camera_ttags[-mem_len:] cam_cnt+=1 if cam_cnt%10==0: if camera is not None: for h in camera: h.remove() camera=plot_camera(ax1,R,-T) camera_t_vec=np.vstack(camera_t_data) camera_r_vec=np.vstack(camera_r_data) if cam_pos_h is not None: for hdl in cam_pos_h: hdl[0].remove() cam_pos_h = [ ax2.plot(camera_ttags,camera_t_vec[:,i],c) for i,c in enumerate('rgb') ] cam_pos_h.append(ax3.plot(camera_t_vec[:,0],camera_t_vec[:,1],'-b',alpha=0.5)) cam_pos_h += [ ax4.plot(camera_ttags,camera_r_vec[:,i],c) for i,c in enumerate('rgb') ] if camera_t_data_gt: camera_t_vec_gt=np.vstack(camera_t_data_gt) camera_r_vec_gt=np.vstack(camera_r_data_gt) camera_t_vec_gt-=camera_t_vec_gt[0] #start at (0,0) cam_pos_h.append(ax3.plot(camera_t_vec_gt[:,0],camera_t_vec_gt[:,1],'-r',alpha=0.5)) cam_pos_h += [ ax2.plot(camera_gt_ttags,camera_t_vec_gt[:,i],c,alpha=0.5) for i,c in enumerate('rgb') ] cam_pos_h += [ ax4.plot(camera_gt_ttags,camera_r_vec_gt[:,i],c,alpha=0.5) for i,c in enumerate('rgb') ] fig.canvas.draw() if cmd=='camera_gt': ttag,R,T=data camera_t_data_gt.append(T) camera_t_data_gt=camera_t_data_gt[-mem_len:] camera_r_data_gt.append(utils.rotationMatrixToEulerAngles(R)*180.0/np.pi) camera_r_data_gt=camera_r_data_gt[-mem_len:] camera_gt_ttags.append(ttag-t_start) camera_gt_ttags=camera_gt_ttags[-mem_len:] if cmd=='pts3d': if pts3d is not None: pts3d.remove() #import pdb;pdb.set_trace() pts3d=ax1.scatter(data[:,0],data[:,1],data[:,2],c='b') if cmd=='stop': break plt.waitforbuttonpress(timeout=0.001) i+=1