def test_Rp2T(self): T = test_utils.get_random_T() R, p = conversions.T2Rp(T) np.testing.assert_almost_equal( conversions.Rp2T(np.array([R]), np.array([p]))[0], conversions.Rp2T(R, p), ) T_test = conversions.Rp2T(R, p) for t, t_test in zip(T.flatten(), T_test.flatten()): self.assertAlmostEqual(t, t_test)
def array_to_pose_data(self, skel, data, T_root_ref=None): assert len(data) == self._num_dofs + 6 T_root = conversions.Rp2T(conversions.A2R(data[3:6]), data[0:3]) if T_root_ref is not None: T_root = np.dot(T_root_ref, T_root) pose_data = [] idx = 6 for i in range(skel.num_joint()): joint = skel.joints[i] if joint == skel.root_joint: pose_data.append(T_root) else: j = self._char_info.bvh_map_inv[joint.name] if j is None: pose_data.append(constants.eye_T()) else: joint_type = self._joint_type[j] if joint_type == self._pb_client.JOINT_FIXED: pose_data.append(constants.eye_T()) elif joint_type == self._pb_client.JOINT_SPHERICAL: pose_data.append( conversions.R2T(conversions.A2R(data[idx:idx + 3]))) idx += 3 else: raise NotImplementedError() return pose_data
def blend(pose1, pose2, alpha=0.5): assert 0.0 <= alpha <= 1.0 pose_new = copy.deepcopy(pose1) for j in range(pose1.skel.num_joints()): R0, p0 = conversions.T2Rp(pose1.get_transform(j, local=True)) R1, p1 = conversions.T2Rp(pose2.get_transform(j, local=True)) R, p = math.slerp(R0, R1, alpha), math.lerp(p0, p1, alpha) pose_new.set_transform(j, conversions.Rp2T(R, p), local=True) return pose_new
def render_model( pb_client, model, draw_link=True, draw_link_info=True, draw_joint=False, draw_joint_geom=True, ee_indices=None, color=None, link_info_scale=1.0, link_info_color=[0, 0, 0, 1], link_info_line_width=1.0, lighting=True, ): if draw_link or draw_link_info: data_visual = pb_client.getVisualShapeData(model) for d in data_visual: if lighting: glEnable(GL_LIGHTING) else: glDisable(GL_LIGHTING) link_id = d[1] if link_id == -1: p, Q = pb_client.getBasePositionAndOrientation(model) else: link_state = pb_client.getLinkState(model, link_id) p, Q = link_state[4], link_state[5] p, Q = np.array(p), np.array(Q) R = conversions.Q2R(Q) T_joint = conversions.Rp2T(R, p) T_visual_from_joint = conversions.Qp2T(np.array(d[6]), np.array(d[5])) glPushMatrix() gl_render.glTransform(T_joint) gl_render.glTransform(T_visual_from_joint) # if color is None: color = d[7] # alpha = 0.5 if draw_joint else color[3] if color is None: color = [d[7][0], d[7][1], d[7][2], d[7][3]] if draw_link: render_geom(geom_type=d[2], geom_size=d[3], color=color) if draw_link_info: render_geom_info(geom_type=d[2], geom_size=d[3], color=link_info_color, scale=link_info_scale, line_width=link_info_line_width) # if ee_indices is not None and link_id in ee_indices: # render_geom_bounding_box(geom_type=d[2], geom_size=d[3], color=[0, 0, 0, 1]) glPopMatrix() if draw_joint_geom: render_joint_geoms(pb_client, model) glDisable(GL_DEPTH_TEST) if draw_joint: render_joints(pb_client, model) render_links(pb_client, model) glEnable(GL_DEPTH_TEST)
def parse_amc(file_path, joints, skel): with open(file_path) as f: content = f.read().splitlines() for idx, line in enumerate(content): if line == ":DEGREES": content = content[idx + 1:] break motion = motion_class.Motion(skel=skel) frames = [] idx = 0 line, idx = read_line(content, idx) assert line[0].isnumeric(), line EOF = False frame = 0 translation_data = [] while not EOF: # joint_degree = {} while True: line, idx = read_line(content, idx) if line is None: EOF = True break if line[0].isnumeric(): break line_idx = 1 if "root" in line[0]: degree = np.array([float(line[i]) for i in range(4, 7)]) joints[line[0]].coordinate = np.array( [float(line[i]) for i in range(1, 4)]) else: degree = [] for lm in joints[line[0]].limits: if lm[0] != lm[1]: degree.append(float(line[line_idx])) line_idx += 1 else: degree.append(0) joints[line[0]].degree = np.deg2rad(np.array(degree).squeeze()) pose_data = [] set_rotation(joints["root"]) for key in joints.keys(): if joints[key].matrix is None: pose_data.append(constants.eye_T()) else: pose_data.append( conversions.Rp2T( joints[key].matrix.squeeze(), joints[key].coordinate.squeeze(), )) fps = 60 motion.add_one_frame(frame / fps, pose_data) frame += 1 return motion
def interpolate(cls, pose1, pose2, alpha): skel = pose1.skel data = [] for j in skel.joints: R1, p1 = conversions.T2Rp(pose1.get_transform(j, local=True)) R2, p2 = conversions.T2Rp(pose2.get_transform(j, local=True)) R, p = ( math.slerp(R1, R2, alpha), math.lerp(p1, p2, alpha), ) data.append(conversions.Rp2T(R, p)) return Pose(pose1.skel, data)
def add_noise_to_pose_vel(self, agent, pose, vel=None, return_as_copied=True): ''' Add a little bit of noise to the given pose and velocity ''' ref_pose = copy.deepcopy(pose) if return_as_copied else pose if vel: ref_vel = copy.deepcopy(vel) if return_as_copied else vel dof_cnt = 0 for j in agent._joint_indices: joint_type = agent.get_joint_type(j) ''' Ignore fixed joints ''' if joint_type == self._pb_client.JOINT_FIXED: continue ''' Ignore if there is no corresponding joint ''' if agent._char_info.bvh_map[j] == None: continue T = ref_pose.get_transform(agent._char_info.bvh_map[j], local=True) R, p = conversions.T2Rp(T) if joint_type == self._pb_client.JOINT_SPHERICAL: dR = math.random_rotation( mu_theta=agent._char_info.noise_pose[j][0], sigma_theta=agent._char_info.noise_pose[j][1], lower_theta=agent._char_info.noise_pose[j][2], upper_theta=agent._char_info.noise_pose[j][3]) dof_cnt += 3 elif joint_type == self._pb_client.JOINT_REVOLUTE: theta = math.truncnorm(mu=agent._char_info.noise_pose[j][0], sigma=agent._char_info.noise_pose[j][1], lower=agent._char_info.noise_pose[j][2], upper=agent._char_info.noise_pose[j][3]) joint_axis = agent.get_joint_axis(j) dR = conversions.A2R(joint_axis * theta) dof_cnt += 1 else: raise NotImplementedError T_new = conversions.Rp2T(np.dot(R, dR), p) ref_pose.set_transform(agent._char_info.bvh_map[j], T_new, do_ortho_norm=False, local=True) if vel is not None: dw = math.truncnorm( mu=np.full(3, agent._char_info.noise_vel[j][0]), sigma=np.full(3, agent._char_info.noise_vel[j][1]), lower=np.full(3, agent._char_info.noise_vel[j][2]), upper=np.full(3, agent._char_info.noise_vel[j][3])) ref_vel.data_local[j][:3] += dw return ref_pose, ref_vel
def compute_target_pose(self, idx, action): agent = self._sim_agent[idx] char_info = agent._char_info ''' the current posture should be deepcopied because action will modify it ''' if self._action_type == Env.ActionMode.Relative: ref_pose = copy.deepcopy(self.get_current_pose_from_motion(idx)) else: ref_pose = copy.deepcopy( self._base_motion[idx].get_pose_by_frame(0)) a_real = self._action_space[idx].norm_to_real(action) dof_cnt = 0 for j in agent._joint_indices: joint_type = agent.get_joint_type(j) ''' Fixed joint will not be affected ''' if joint_type == self._pb_client.JOINT_FIXED: continue ''' If the joint do not have correspondance, use the reference posture itself''' if char_info.bvh_map[j] == None: continue if self._action_type == Env.ActionMode.Relative: T = ref_pose.get_transform(char_info.bvh_map[j], local=True) elif self._action_type == Env.ActionMode.Absolute: T = ref_pose.skel.get_joint( char_info.bvh_map[j]).xform_from_parent_joint else: raise NotImplementedError R, p = conversions.T2Rp(T) if joint_type == self._pb_client.JOINT_SPHERICAL: dR = conversions.A2R(a_real[dof_cnt:dof_cnt + 3]) dof_cnt += 3 elif joint_type == self._pb_client.JOINT_REVOLUTE: axis = agent.get_joint_axis(j) angle = a_real[dof_cnt:dof_cnt + 1] dR = conversions.A2R(axis * angle) dof_cnt += 1 else: raise NotImplementedError T_new = conversions.Rp2T(np.dot(R, dR), p) ref_pose.set_transform(char_info.bvh_map[j], T_new, do_ortho_norm=False, local=True) return ref_pose
def create_motion_from_amass_data(filename, bm, override_betas=None): bdata = np.load(filename) if override_betas is not None: betas = torch.Tensor(override_betas[:10][np.newaxis]).to("cpu") else: betas = torch.Tensor(bdata["betas"][:10][np.newaxis]).to("cpu") skel = create_skeleton_from_amass_bodymodel( bm, betas, len(joint_names), joint_names, ) fps = float(bdata["mocap_framerate"]) root_orient = bdata["poses"][:, :3] # controls the global root orientation pose_body = bdata["poses"][:, 3:66] # controls body joint angles trans = bdata["trans"][:, :3] # controls the finger articulation motion = motion_class.Motion(skel=skel, fps=fps) num_joints = skel.num_joints() parents = bm.kintree_table[0].long()[:num_joints] for frame in range(pose_body.shape[0]): pose_body_frame = pose_body[frame] root_orient_frame = root_orient[frame] root_trans_frame = trans[frame] pose_data = [] for j in range(num_joints): if j == 0: T = conversions.Rp2T(conversions.A2R(root_orient_frame), root_trans_frame) else: T = conversions.R2T( conversions.A2R(pose_body_frame[(j - 1) * 3:(j - 1) * 3 + 3])) pose_data.append(T) motion.add_one_frame(pose_data) return motion
def _render_pose(self, pose, body_model, color): skel = pose.skel for j in skel.joints: T = pose.get_transform(j, local=False) pos = conversions.T2p(T) gl_render.render_point(pos, radius=0.03 * self.scale, color=color) if j.parent_joint is not None: # returns X that X dot vec1 = vec2 pos_parent = conversions.T2p( pose.get_transform(j.parent_joint, local=False)) p = 0.5 * (pos_parent + pos) l = np.linalg.norm(pos_parent - pos) r = 0.05 R = math.R_from_vectors(np.array([0, 0, 1]), pos_parent - pos) gl_render.render_capsule( conversions.Rp2T(R, p), l, r * self.scale, color=color, slice=8, )
def transform(motion, T, local=False): """ Apply transform to all poses of a motion sequence. The operation is done in-place. Args: motion: Motion sequence to be transformed T: Transformation matrix of shape (4, 4) to be applied to poses of motion local: Optional; Set local=True if the transformations are to be applied locally, relative to parent of each joint. """ for pose_id in range(len(motion.poses)): R0, p0 = conversions.T2Rp(motion.poses[pose_id].get_root_transform()) R1, p1 = conversions.T2Rp(T) if local: R, p = np.dot(R0, R1), p0 + np.dot(R0, p1) else: R, p = np.dot(R1, R0), p0 + p1 motion.poses[pose_id].set_root_transform( conversions.Rp2T(R, p), local=False, ) return motion
def get_facing_transform(self): d, p = self.get_facing_direction_position() z = d y = self.skel.v_up_env x = np.cross(y, z) return conversions.Rp2T(np.array([x, y, z]).transpose(), p)
def get_facing_transform(self, ground_height=0.0): d, p = self.get_facing_direction_position(ground_height) z = d y = self._char_info.v_up_env x = np.cross(y, z) return conversions.Rp2T(np.array([x, y, z]).transpose(), p)
def get_transform_flat(self): R = self.get_cam_rotatioin() R = R.transpose() p = self.pos return list(conversions.Rp2T(R, p).ravel())