def test_matrices_from_quaternions(): random_state = np.random.RandomState(83) for _ in range(5): q = pr.random_quaternion(random_state) R = pbr.matrices_from_quaternions([q], normalize_quaternions=False)[0] q2 = pr.quaternion_from_matrix(R) pr.assert_quaternion_equal(q, q2) for _ in range(5): q = random_state.randn(4) R = pbr.matrices_from_quaternions([q], normalize_quaternions=True)[0] q2 = pr.quaternion_from_matrix(R) pr.assert_quaternion_equal(q / np.linalg.norm(q), q2)
def test_matrices_from_pos_quat(): """Test conversion from positions and quaternions to matrices.""" P = np.empty((10, 7)) random_state = np.random.RandomState(0) P[:, :3] = random_state.randn(10, 3) for t in range(10): P[t, 3:] = random_quaternion(random_state) H = matrices_from_pos_quat(P) for t in range(10): assert_array_almost_equal(P[t, :3], H[t, :3, 3]) assert_quaternion_equal(P[t, 3:], quaternion_from_matrix(H[t, :3, :3]))
def test_transforms_from_pqs_1dim(): P = np.empty((10, 7)) random_state = np.random.RandomState(0) P[:, :3] = random_state.randn(len(P), 3) P[:, 3:] = norm_vectors(random_state.randn(len(P), 4)) H = transforms_from_pqs(P) P2 = pqs_from_transforms(H) assert_array_almost_equal(P[:, :3], H[:, :3, 3]) assert_array_almost_equal(P[:, :3], P2[:, :3]) for t in range(len(P)): assert_quaternion_equal(P[t, 3:], quaternion_from_matrix(H[t, :3, :3])) assert_quaternion_equal(P[t, 3:], P2[t, 3:])
def calc_inverse_kinematics(self, guess, target_position, target_orientation, tcp_config=None, orientation_type=None): """ End-effector orientation (target_orientation) can be provided in several standard formats, by specifying the orinetation_type (default is None): - 'matrix': rotation matrix -> 3x3 array, in row major order - 'axis_angle': axis angle -> {'angle': float, 'x': float, 'y': float, 'z': float} - 'euler_zyx': {yaw, pitch, roll} Euler (Tait-Bryan) angles -> {'yaw': float, 'pitch': float, 'roll': float} - 'quat': quaternion -> {'w': float, 'x': float, 'y': float, 'z': float} - None: defaults to quaternion Conversion relies on pytransform3d library """ quat_not_normed = None if orientation_type == 'matrix': self.__ensure_pyt3d() quat_not_normed = pyrot.quaternion_from_matrix( pyrot.check_matrix(target_orientation)) elif orientation_type == 'axis_angle': self.__ensure_pyt3d() axis_angle = [ target_orientation['x'], target_orientation['y'], target_orientation['z'], target_orientation['angle'] ] quat_not_normed = pyrot.quaternion_from_axis_angle( pyrot.check_axis_angle(axis_angle)) elif orientation_type == 'euler_zyx': self.__ensure_pyt3d() euler_zyx = [ target_orientation['yaw'], target_orientation['pitch'], target_orientation['roll'] ] matrix = pyrot.matrix_from_euler_zyx(euler_zyx) quat_not_normed = pyrot.quaternion_from_matrix( pyrot.check_matrix(matrix)) elif orientation_type == 'quat' or orientation_type is None: quat_not_normed = [ target_orientation['w'], target_orientation['x'], target_orientation['y'], target_orientation['z'] ] else: eva_error( f'calc_inverse_kinematics invalid "{orientation_type}" orientation_type' ) quat_normed = self.__normalize(quat_not_normed) quaternion = { 'w': quat_normed[0], 'x': quat_normed[1], 'y': quat_normed[2], 'z': quat_normed[3] } body = { 'guess': guess, 'position': target_position, 'orientation': quaternion } if tcp_config is not None: body['tcp_config'] = tcp_config r = self.api_call_with_auth('PUT', 'calc/inverse_kinematics', json.dumps(body)) if r.status_code != 200: eva_error('inverse_kinematics error', r) return self.__check_calculation(r, 'calc_inverse_kinematics', 'ik')['joints']
def run(vid, viz=False, all_case=None): # ------------------------------------------------------------------------------------------------------------------ # load data vid_data = np.load(vid, allow_pickle=True) assert (vid_data['poses'].shape[-1] == 156), 'poses are of dim ' + str( len(vid_data['poses']) ) # expecting SMPL-H format, otherwise retargeting isn't guaranteed to work data = vid_data['poses'][:, :66] T = data.shape[0] data = data.reshape(T, -1, 3) with open('../DeepMimic/data/characters/humanoid3d.txt') as f: dm_model = json.load(f) with open( '../human_dynamics/models/neutral_smpl_with_cocoplustoesankles_reg.pkl', 'rb') as f: smpl_model = pickle.load(f, encoding='latin1') # get SMPL model info and orient to DeepMimic base = smpl_model['J'][:-2] smpl_parents = smpl_model['kintree_table'][ 0][:-2] # dont need last two palm joints # convert data to 3d pose Rs = np.array([Rot.from_rotvec(data[i]).as_matrix() for i in range(T)]) posed, _ = batch_global_rigid_transformation( Rs, np.tile(np.expand_dims(base, 0), (T, 1, 1)), smpl_parents) # get DeepMimic joints joints = dm_model['Skeleton']['Joints'] joint_coords = [] dm_parents = [] for joint_dict in joints: delta = np.array([ joint_dict['AttachX'], joint_dict['AttachY'], joint_dict['AttachZ'] ]) if joint_dict['Parent'] != -1: coords = joint_coords[joint_dict['Parent']] + delta else: coords = delta joint_coords.append(coords) dm_parents.append(joint_dict['Parent']) joint_coords = np.array(joint_coords) euler = [0, -np.pi / 2, 0] dm_to_smpl = Rot.from_rotvec(euler).as_matrix() euler = [0, np.pi / 2, 0] smpl_to_dm = Rot.from_rotvec(euler).as_matrix() # sanity = joint_coords.copy() # sanity = np.concatenate([sanity, [[1,0,0]]], axis=0) # sanity = np.concatenate([[sanity[0]], (dm_to_smpl@sanity[1:].T).T], axis=0) # compare_single(sanity, base) # ---------------------------------------------------retarget--------------------------------------------------------------- dm_parents += [2, 5, 11 ] # need the extra joints to stabilize ankle/head rotations # first calculate root offset corrected = posed.copy() corrected[:, 0] = (corrected[:, 1] + corrected[:, 2]) / 2 corrected = corrected[:, smpl_to_dm_map] # then just adjust the relevant limb lengths adjusted = [] for i in range(1, 15): limb_length = np.linalg.norm(joint_coords[i] - joint_coords[dm_parents[i]]) current = corrected[:, i] - corrected[:, dm_parents[i]] adjusted.append(current / np.linalg.norm(current, axis=1).reshape(-1, 1) * limb_length) adjusted.append(corrected[:, 15] - corrected[:, dm_parents[15]]) adjusted.append(corrected[:, 16] - corrected[:, dm_parents[16]]) adjusted.append(corrected[:, 17] - corrected[:, dm_parents[17]]) for i in range(1, 18): corrected[:, i] = corrected[:, dm_parents[i]] + adjusted[i - 1] # corrected[:, :, 1] += 1 # set character higher on y axis corrected = corrected - corrected[:, 0:1] + joint_coords[0:1] np.set_printoptions(suppress=True) # orientation correction if all_case is None: animate_single(posed) root_adjust = get_root_correction( int(input('which case? (0/1/2/any number)'))) else: root_adjust = get_root_correction(all_case) # ------------------------------------------------- visualize retarget----------------------------------------------------------------- # posed[:, :, 0] += 1 corrected2 = corrected.copy() corrected2 += np.expand_dims(vid_data['trans'], 1) - np.expand_dims( corrected2[:, 0], 1) corrected2 = np.array([(root_adjust @ corrected2[i].T).T for i in range(T)]) posed += np.expand_dims(vid_data['trans'], 1) - np.expand_dims( posed[:, 0], 1) if viz: compare_animate(posed, corrected2, interval=12) # sys.exit(0) # ------------------------------------------------------get and save retargeted rotation matrices----------------------------------------------------------- head_coords = (smpl_to_dm @ ( base[smpl_to_dm_map[15]] - base[smpl_to_dm_map[dm_parents[15]]] + dm_to_smpl @ joint_coords[dm_parents[15]]).T).T.reshape(1, 3) rtoe_coords = (smpl_to_dm @ ( base[smpl_to_dm_map[16]] - base[smpl_to_dm_map[dm_parents[16]]] + dm_to_smpl @ joint_coords[dm_parents[16]])).T.reshape(1, 3) ltoe_coords = (smpl_to_dm @ ( base[smpl_to_dm_map[17]] - base[smpl_to_dm_map[dm_parents[17]]] + dm_to_smpl @ joint_coords[dm_parents[17]])).T.reshape(1, 3) joint_coords_retargeting = np.concatenate( [joint_coords, head_coords, rtoe_coords, ltoe_coords], axis=0) # compare_single(sanity, joint_coords_retargeting) # sys.exit(0) corrected_body = np.array([(root_adjust @ corrected[i, 1:].T).T for i in range(T)]) corrected = np.concatenate([corrected[:, 0:1], corrected_body], axis=1) quarts = [] terminal = [8, 14, 15, 16, 17] all_Rs = [] # # TODO: can we batch this somehow? for t in tqdm(range(T), 'processing frames...'): root_orient = root_adjust @ Rs[t, 0] @ dm_to_smpl retargeted_Rs, retargeted_As = inv_kin( root_orient, joint_coords_retargeting, corrected[t], dm_parents, terminal) # retargeted_Rs = (1,22,3,3) quart = [] for i in range(len(retargeted_Rs[0])): R = retargeted_Rs[0, i] if i == 0: q = quaternion_from_matrix(R, strict_check=False) quart.append(q) elif i not in terminal: try: q = quaternion_from_matrix(R) if i in one_d_rotations: end_orient = np.linalg.inv( retargeted_As[0, dm_parents[i]]) @ ( corrected[t, dm_parents.index(i)] - corrected[t, i]) start_orient = ( joint_coords_retargeting[dm_parents.index(i)] - joint_coords_retargeting[i]) if np.all(end_orient == -start_orient): theta = np.pi else: theta = np.arccos( np.dot(end_orient, start_orient) / (np.linalg.norm(end_orient) * np.linalg.norm(start_orient)) ) # so far seems like a good enough approx if i not in elbows: # intuitively, knees and elbows rotate w.r.t different axes theta = 2 * np.pi - theta quart.append(np.array([theta])) else: quart.append(q) except ValueError as e: # strict check should only fail for the random rotation matrices of the terminal joints print("unexpected bad R at joint %d, frame %d" % (i, t)) print(e) quarts.append(np.concatenate(quart)) all_Rs.append(retargeted_Rs[0]) fps = vid_data['mocap_framerate'] spf = 1 / fps root_motion = (root_adjust @ vid_data['trans'].T).T if not os.path.exists(out_path): os.makedirs(out_path) outfile = os.path.join(out_path, vid.split('/')[-1].split('.')[0] + '.txt') with open(outfile, 'w') as f: f.write('{\n') f.write('"Loop": "wrap",\n') f.write('"Frames":\n') f.write('[\n') for i in range(len(quarts)): frame = [spf] + list(root_motion[i]) + list(quarts[i]) f.write(str(frame)) if i < len(quarts) - 1: f.write(',') f.write("\n") f.write(']\n') f.write('}') # ----------------------------------check final output one last time---------------------------------- if viz: all_Rs = np.array(all_Rs) check = batch_global_rigid_transformation( all_Rs, np.tile(np.expand_dims(joint_coords_retargeting, 0), (T, 1, 1)), dm_parents)[0] corrected[:, :, 0] += 1 compare_animate(corrected, check)
import numpy as np import matplotlib.pyplot as plt from pytransform3d.rotations import (matrix_from_axis_angle, quaternion_from_matrix, quaternion_slerp) from pytransform3d.trajectories import plot_trajectory T = np.linspace(0, 1, 1001) sigmoid = 0.5 * (np.tanh(1.5 * np.pi * (T - 0.5)) + 1.0) radius = 0.5 start_angle = np.deg2rad(0.0) end_angle = np.deg2rad(350.0) R1 = matrix_from_axis_angle([0, 0, 1, 0.5 * np.pi]) R2_start = matrix_from_axis_angle([1, 0, 0, start_angle]) R2_end = matrix_from_axis_angle([1, 0, 0, end_angle]) q_start = quaternion_from_matrix(R1.dot(R2_start)) q_end = quaternion_from_matrix(R1.dot(R2_end)) Y = np.zeros((len(T), 7)) Y[:, 0] = radius * np.cos(np.deg2rad(90) - end_angle * sigmoid) Y[:, 2] = radius * np.sin(np.deg2rad(90) - end_angle * sigmoid) if end_angle > np.pi: q_end *= -1.0 Y[:, 3:] = (1.0 - T)[:, np.newaxis] * q_start + T[:, np.newaxis] * q_end Y_slerp = np.zeros((len(T), 7)) Y_slerp[:, 0] = 0.7 * radius * np.cos(np.deg2rad(90) - end_angle * sigmoid) Y_slerp[:, 2] = 0.7 * radius * np.sin(np.deg2rad(90) - end_angle * sigmoid) for i, t in enumerate(T): Y_slerp[i, 3:] = quaternion_slerp(q_start, q_end, t)
canvas_y, canvas_x, _ = canvas.shape # canvas = np.zeros((canvas_y, canvas_x, 3), dtype=np.uint8) # 读取纸张 paper = cv2.imread(r'input\paper.png') jmax, imax, _ = paper.shape # 纸张放在画布的初始位置,这里定位中心,并建立起纸张坐标->画布坐标的转换关系 dx, dy = (canvas_x - imax)//2, (canvas_y-jmax)//2 def paper2canvas(x, y): return x+dx, y+dy # 通过欧拉角获得旋转的四元数 z_rot, y_rot, x_rot = 30, 30, 15 rot_matrix = active_matrix_from_extrinsic_euler_zyx(((z_rot/180*np.pi), (y_rot/180*np.pi), (x_rot/180*np.pi))) qs = quaternion_slerp_batch(q_id, quaternion_from_matrix(rot_matrix), np.linspace(0, 1, n_steps)) def do_rot(v, t): '''对t时刻时的向量(点)v做旋转变换''' return q_prod_vector(qs[t], v) # 除了旋转之外,还会有平移 trans_start, trans_max = (0, 0, 0), (0, 0, 1600) trans = np.linspace(trans_start, trans_max, n_steps) def do_trans(v, t): return (v[0]+trans[t][0]), (v[1]+trans[t][1]), (v[2]+trans[t][2]) # 透视的话需要知道眼睛的位置 eye = (imax//2, jmax//2, -1600) def proj(v): '''将空间坐标映射到纸张初始平面'''
while True: frames = pipeline.wait_for_frames() f = frames.first_or_default(rs.stream.pose) data = f.as_pose_frame().get_pose_data() #angles = quaternion_to_euler_angle([data.rotation.w, data.rotation.x, data.rotation.y, data.rotation.z]) tm.add_transform( "world", "slam_sensor", pytr.transform_from_pq([ data.translation.x * 1000.0, -data.translation.z * 1000.0, data.translation.y * 1000.0, data.rotation.w, data.rotation.x, data.rotation.y, data.rotation.z ])) t = tm.get_transform("world", "origin") q = pyrot.quaternion_from_matrix(t[0:3, 0:3]) print( pytr.transform(t, [0, 0, 0, 1])[0:3], quaternion_to_euler_angle(q), data.mapper_confidence, data.tracker_confidence) ##print("\r Device Position: ", t[0][3], t[1][3], t[2][3], angles, end="\r") # now = time.time() # with open("back_side.txt", "w") as text_file: # while (time.time() - now) < 5: # frames = pipeline.wait_for_frames() # f = frames.first_or_default(rs.stream.pose) # data = f.as_pose_frame().get_pose_data() # angles = quaternion_to_euler_angle(data.rotation.w, data.rotation.x, data.rotation.y,data.rotation.z) # print("\r Device Position: ", -data.translation.x * 1000, data.translation.z * 1000, data.translation.y * 1000, angles, end="\r") # print(str(data.translation), str(data.rotation), file=text_file)