def getBlendedPose(self, poses, weights, additiveBlending=True): import transformations as tm REST_QUAT = np.asarray([1, 0, 0, 0], dtype=np.float32) if isinstance(poses[0], basestring): f_idxs = [self.getPoseNames().index(pname) for pname in poses] else: f_idxs = poses if not additiveBlending: # normalize weights if not isinstance(weights, np.ndarray): weights = np.asarray(weights, dtype=np.float32) t = sum(weights) if t < 1: # Fill up rest with neutral pose (neutral pose is assumed to be first frame) weights = np.asarray(weights + [1.0 - t], dtype=np.float32) f_idxs.append(0) weights /= t #print zip([self.getPoseNames()[_f] for _f in f_idxs],weights) result = emptyPose(self.nBones) m = np.identity(4, dtype=np.float32) m1 = np.identity(4, dtype=np.float32) m2 = np.identity(4, dtype=np.float32) if len(f_idxs) == 1: for b_idx in xrange(self.nBones): m[:3, :4] = self.getAtFramePos(f_idxs[0], True)[b_idx] q = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m, True), float(weights[0])) result[b_idx] = tm.quaternion_matrix(q)[:3, :4] else: for b_idx in xrange(self.nBones): m1[:3, :4] = self.getAtFramePos(f_idxs[0], True)[b_idx] m2[:3, :4] = self.getAtFramePos(f_idxs[1], True)[b_idx] q1 = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m1, True), float(weights[0])) q2 = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m2, True), float(weights[1])) quat = tm.quaternion_multiply(q2, q1) for i, f_idx in enumerate(f_idxs[2:]): i += 2 m[:3, :4] = self.getAtFramePos(f_idx, True)[b_idx] q = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m, True), float(weights[i])) quat = tm.quaternion_multiply(q, quat) result[b_idx] = tm.quaternion_matrix(quat)[:3, :4] return Pose(self.name + '-blended', result)
def set_smoothing_constraint(self, frames, constraint, frame_idx, window): backward_hit = None forward_hit = None start_window = max(frame_idx - window, 0) end_window = min(frame_idx + window, len(frames)) # look backward search_window = list(range(start_window, frame_idx)) for f in reversed(search_window): if constraint.joint_name in self.orientation_constraint_buffer[f]: backward_hit = f break # look forward for f in range(frame_idx, end_window): if constraint.joint_name in self.orientation_constraint_buffer[f]: forward_hit = f break # update q oq = get_global_orientation(self.skeleton, constraint.joint_name, frames[frame_idx]) if backward_hit is not None and constraint.toe_position is not None: bq = self.orientation_constraint_buffer[backward_hit][ constraint.joint_name] j = frame_idx - backward_hit t = float(j + 1) / (window + 1) global_q = normalize( quaternion_slerp(bq, oq, t, spin=0, shortestpath=True)) constraint.orientation = normalize(global_q) delta = get_quat_delta(oq, global_q) delta = normalize(delta) # rotate stored vector by global delta if constraint.joint_name == self.left_foot: constraint.position = regenerate_ankle_constraint_with_new_orientation2( constraint.toe_position, constraint.global_toe_offset, delta) else: constraint.position = regenerate_ankle_constraint_with_new_orientation2( constraint.toe_position, constraint.global_toe_offset, delta) elif forward_hit is not None and constraint.heel_position is not None: k = forward_hit - frame_idx t = float(k + 1) / (window + 1) fq = self.orientation_constraint_buffer[forward_hit][ constraint.joint_name] global_q = normalize( quaternion_slerp(oq, fq, t, spin=0, shortestpath=True)) constraint.orientation = normalize(global_q) constraint.position = regenerate_ankle_constraint_with_new_orientation( constraint.heel_position, constraint.heel_offset, constraint.orientation)
def getBlendedPose(self, poses, weights, additiveBlending=True): import transformations as tm REST_QUAT = np.asarray([1,0,0,0], dtype=np.float32) if isinstance(poses[0], basestring): f_idxs = [self.getPoseNames().index(pname) for pname in poses] else: f_idxs = poses if not additiveBlending: # normalize weights if not isinstance(weights, np.ndarray): weights = np.asarray(weights, dtype=np.float32) t = sum(weights) if t < 1: # Fill up rest with neutral pose (neutral pose is assumed to be first frame) weights = np.asarray(weights + [1.0-t], dtype=np.float32) f_idxs.append(0) weights /= t #print zip([self.getPoseNames()[_f] for _f in f_idxs],weights) result = emptyPose(self.nBones) m = np.identity(4, dtype=np.float32) m1 = np.identity(4, dtype=np.float32) m2 = np.identity(4, dtype=np.float32) if len(f_idxs) == 1: for b_idx in xrange(self.nBones): m[:3, :4] = self.getAtFramePos(f_idxs[0], True)[b_idx] q = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m, True), float(weights[0])) result[b_idx] = tm.quaternion_matrix( q )[:3,:4] else: for b_idx in xrange(self.nBones): m1[:3, :4] = self.getAtFramePos(f_idxs[0], True)[b_idx] m2[:3, :4] = self.getAtFramePos(f_idxs[1], True)[b_idx] q1 = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m1, True), float(weights[0])) q2 = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m2, True), float(weights[1])) quat = tm.quaternion_multiply(q2, q1) for i,f_idx in enumerate(f_idxs[2:]): i += 2 m[:3, :4] = self.getAtFramePos(f_idx, True)[b_idx] q = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m, True), float(weights[i])) quat = tm.quaternion_multiply(q, quat) result[b_idx] = tm.quaternion_matrix( quat )[:3,:4] return Pose(self.name+'-blended', result)
def apply_end_orientation_correction(self, end_orientation): state_entry = self.get_last_state() if state_entry is None: return if self.settings.end_target_blend_range <= 0: state_entry.state.mv.frames[-1, 3:7] = end_orientation else: blend_range = min(self.settings.end_target_blend_range, state_entry.state.mv.n_frames) start_idx = state_entry.state.mv.n_frames-blend_range if start_idx < 0: blend_range = blend_range + start_idx start_idx = 0 #clipped_start_idx = max(start_idx, 0) #unit_q = np.array([1,0,0,0]) n_frames = len(state_entry.state.mv.frames) for i in range(blend_range): frame_idx = start_idx+i if 0 < frame_idx < n_frames: w = float(i) / blend_range q0 = state_entry.state.mv.frames[frame_idx, 3:7] q0 = normalize(q0) new_q = quaternion_slerp(q0, end_orientation, w) new_q = normalize(new_q) state_entry.state.mv.frames[frame_idx, 3:7] = new_q state_entry.state.mv.frames[-1, 3:7] = end_orientation #print("after alignment",get_root_delta_angle(self.skeleton, pelvis_name, self.state_queue[-1].state.mv.frames, end_direction)) if len(self.state_queue) > 0: print("apply end orentation correction") self.state_queue[-1] = state_entry
def create_constraint(self, frame): global_matrix = self.skeleton.nodes[self.joint_name].get_global_matrix( frame) if self.prev_target_pos is None: self.prev_target_pos = global_matrix[:3, 3] delta = self.get_delta() self.distance = np.linalg.norm(delta) self.max_distance = self.distance else: delta = self.get_delta() self.distance = np.linalg.norm(delta) if self.distance > self.eps: delta /= self.distance delta *= min(self.distance, self.speed) new_target_pos = self.prev_target_pos + delta new_q = None if self.distance < self.rotation_distance: if self.original_rotation is None: self.original_rotation = normalize( quaternion_from_matrix(global_matrix)) target_q = self.orientation weight = (1.0 - (self.distance / min(self.rotation_distance, self.max_distance))) new_q = quaternion_slerp(self.original_rotation, target_q, weight) #print("w",new_q, weight, self.original_rotation, target_q) c = KeyframeConstraint(0, self.joint_name, new_target_pos, new_q) self.prev_target_pos = new_target_pos else: c = KeyframeConstraint(0, self.joint_name, self.position, self.orientation) return c
def slerp_frame(self, joint_name, frame1, frame2, weight): idx = self.skeleton.nodes[ joint_name].parent.quaternion_frame_index * 4 + 3 q1 = normalize(frame1[idx:idx + 4]) q2 = normalize(frame2[idx:idx + 4]) frame1[idx:idx + 4] = normalize(quaternion_slerp(q1, q2, weight)) return frame1
def orient_end_effector_to_target(skeleton, root, end_effector, frame, constraint): """ find angle between the vectors end_effector - root and target- root """ # align vectors root_pos = skeleton.nodes[root].get_global_position(frame) if constraint.offset is not None: m = skeleton.nodes[end_effector].get_global_matrix(frame) end_effector_pos = np.dot(m, constraint.offset)[:3] else: end_effector_pos = skeleton.nodes[end_effector].get_global_position( frame) src_delta = end_effector_pos - root_pos src_dir = src_delta / np.linalg.norm(src_delta) target_delta = constraint.position - root_pos target_dir = target_delta / np.linalg.norm(target_delta) root_delta_q = quaternion_from_vector_to_vector(src_dir, target_dir) root_delta_q = normalize(root_delta_q) if skeleton.nodes[root].stiffness > 0: t = 1 - skeleton.nodes[root].stiffness root_delta_q = quaternion_slerp([1, 0, 0, 0], root_delta_q, t) root_delta_q = normalize(root_delta_q) global_m = quaternion_matrix(root_delta_q) parent_joint = skeleton.nodes[root].parent.node_name parent_m = skeleton.nodes[parent_joint].get_global_matrix(frame, use_cache=True) old_global = np.dot(parent_m, skeleton.nodes[root].get_local_matrix(frame)) new_global = np.dot(global_m, old_global) q = quaternion_from_matrix(new_global) return normalize(q)
def interpolate_with(self, other_tf, t): """Interpolate with another rigid transformation. Parameters ---------- other_tf : :obj:`RigidTransform` The transform to interpolate with. t : float The interpolation step in [0,1], where 0 favors this RigidTransform. Returns ------- :obj:`RigidTransform` The interpolated RigidTransform. Raises ------ ValueError If t isn't in [0,1]. """ if t < 0 or t > 1: raise ValueError('Must interpolate between 0 and 1') interp_translation = (1.0 - t) * self.translation + t * other_tf.translation interp_rotation = transformations.quaternion_slerp( self.quaternion, other_tf.quaternion, t) interp_tf = RigidTransform(rotation=interp_rotation, translation=interp_translation, from_frame=self.from_frame, to_frame=self.to_frame) return interp_tf
def RediscretizeTrajectory(self, traj, num_between_points): or_points = len(traj) if (num_between_points < 1): return None new_traj = [] for i in range(0, or_points - 1): cur_traj_point = traj[i] next_traj_point = traj[i + 1] old_pos = cur_traj_point[0:3, 3] next_pos = next_traj_point[0:3, 3] pos_diff = (next_pos - old_pos) or_quat = transformations.quaternion_from_matrix(cur_traj_point) next_quat = transformations.quaternion_from_matrix(next_traj_point) new_traj.append(np.copy(cur_traj_point)) for j in range(0, num_between_points): fraction = float(j + 1) / float(num_between_points + 1) interp_quat = transformations.quaternion_slerp( or_quat, next_quat, fraction) interp_pos = old_pos + (fraction * pos_diff) interp_trans = transformations.quaternion_translation_matrix( interp_quat, interp_pos) new_traj.append(np.copy(interp_trans)) new_traj.append(np.copy(traj[len(traj) - 1])) #append last traj point return new_traj
def align_joint(skeleton, frames, frame_idx, foot_joint, ik_chain, ik_window=7): transition_start = frame_idx + 1 transition_end = frame_idx + ik_window c = create_grounding_constraint_from_frame(skeleton, frames, frame_idx, foot_joint) ik = AnalyticalLimbIK.init_from_dict(skeleton, c.joint_name, ik_chain) frames[transition_start] = ik.apply(frames[transition_start], c.position, c.orientation) chain_joints = [ik_chain["root"], ik_chain["joint"], foot_joint] for c_joint in chain_joints: idx = skeleton.animated_joints.index(c_joint) * 4 + 3 j_indices = [idx, idx + 1, idx + 2, idx + 3] start_q = frames[transition_start][j_indices] end_q = frames[transition_end][j_indices] for i in range(ik_window): t = float(i) / ik_window frames[transition_start + 1 + i][j_indices] = quaternion_slerp( start_q, end_q, t, spin=0, shortestpath=True) return frames
def align_frames_and_fix_foot_to_prev(skeleton, aligning_joint, new_frames, prev_frames, start_pose, foot_joint, ik_chain, ik_window=7, smoothing_window=0): new_frames = align_quaternion_frames(skeleton, aligning_joint, new_frames, prev_frames, start_pose) if prev_frames is not None: offset = prev_frames[-1][:3] - new_frames[0][:3] d = len(prev_frames) frames = prev_frames.tolist() for idx in range(1, len(new_frames)): # skip first frame frames.append(new_frames[idx]) frames = np.array(frames) transition_start = d c = create_grounding_constraint_from_frame(skeleton, frames, d - 1, foot_joint) ik = AnalyticalLimbIK.init_from_dict(skeleton, c.joint_name, ik_chain) before = skeleton.nodes[foot_joint].get_global_position( frames[transition_start]) frames[transition_start] = ik.apply(frames[transition_start], c.position, c.orientation) transition_end = d + ik_window print( "allign frames", c.position, foot_joint, d - 1, transition_end, before, skeleton.nodes[foot_joint].get_global_position( frames[transition_start])) print(skeleton.nodes[foot_joint].get_global_position(frames[d])) chain_joints = [ik_chain["root"], ik_chain["joint"], foot_joint] for c_joint in chain_joints: idx = skeleton.animated_joints.index(c_joint) * 4 + 3 j_indices = [idx, idx + 1, idx + 2, idx + 3] start_q = frames[transition_start][j_indices] end_q = frames[transition_end][j_indices] print(c_joint, start_q, end_q, j_indices) for i in range(ik_window): t = float(i) / ik_window # nlerp_q = self.nlerp(start_q, end_q, t) slerp_q = quaternion_slerp(start_q, end_q, t, spin=0, shortestpath=True) print(transition_start + i + 1, frames[transition_start + 1 + i][j_indices], slerp_q) frames[transition_start + 1 + i][j_indices] = slerp_q if smoothing_window > 0 and False: frames = smooth_quaternion_frames(frames, d, smoothing_window) return frames else: return new_frames
def averageTransforms(l_transforms): N = len(l_transforms) print("Computing the average of " + str(N) + " transforms") # Get a list of all the translations l_t l_t = [i[0] for i in l_transforms] # print(l_t) # compute the average translation tmp1 = sum(v[0] for v in l_t) / N tmp2 = sum(v[1] for v in l_t) / N tmp3 = sum(v[2] for v in l_t) / N avg_t = (tmp1, tmp2, tmp3) # print(avg_t) # Get a list of the rotation quaternions l_q = [i[1] for i in l_transforms] #print l_q # Average the quaterions using an incremental slerp approach acc = 1.0 # accumulator, counts the number of observations inserted already avg_q = random_quaternion( ) # can be random for start, since it will not be used for q in l_q: # How to deduce the ratio on each iteration # w_q = 1 / (acc + 1) # w_qavg = acc / (acc + 1) # ratio = w_q / w_qavg <=> 1 / acc avg_q = quaternion_slerp(avg_q, q, 1.0 / acc) # run pairwise slerp acc = acc + 1 # increment acc avg_q = tuple(avg_q) # print(avg_q) return (avg_t, avg_q)
def generate_frame_using_iterative_slerp2(skeleton, frames, weights): """src: https://gamedev.stackexchange.com/questions/62354/method-for-interpolation-between-3-quaternions """ frame = None w_sum = 0.0 for frame_idx, w in enumerate(weights): if frame is None: frame = frames[frame_idx][:] w_sum += w else: new_w_sum = w_sum + w if new_w_sum > 0: w_a = w_sum / new_w_sum w_b = w / new_w_sum frame_b = frames[frame_idx][:] frame[:3] = w_a * frame[:3] + w_b * frame_b[:3] for idx, j in enumerate(skeleton.animated_joints): q_start_idx = (idx * 4) + 3 q_end_idx = q_start_idx + 4 q_a = np.array(frame[q_start_idx:q_end_idx]) q_b = frame_b[q_start_idx:q_end_idx] new_q = quaternion_slerp(q_a, q_b, w_b) new_q /= np.linalg.norm(new_q) frame[q_start_idx:q_end_idx] = new_q w_sum = new_w_sum return frame
def interp_transforms(T1, T2, alpha): assert alpha <= 1 T_avg = alpha * T1 + (1 - alpha) * T2 q1 = quaternion_from_matrix(T1) q2 = quaternion_from_matrix(T2) q3 = quaternion_slerp(q1, q2, alpha) R = quaternion_matrix(q3) T_avg[0:3, 0:3] = R[0:3, 0:3] return T_avg
def apply_smoothing(self, new_frames, ref_frame, start_frame, window): for i in range(window): t = (i / window) for idx in range(len(self.skeleton.animated_joints)): o = idx*4 + 3 indices = [o, o+1, o+2, o+3] old_quat = ref_frame[indices] new_quat = new_frames[start_frame + i, indices] new_frames[start_frame + i, indices] = quaternion_slerp(old_quat, new_quat, t, spin=0, shortestpath=True) return new_frames
def generated_blend(start_q, end_q, window): blend = np.zeros((window, 4)) for i in range(window): t = float(i) / window slerp_q = quaternion_slerp(start_q, end_q, t, spin=0, shortestpath=True) blend[i] = slerp_q return blend
def timerfunc(_): global starttime nexttime = time.time() deltatime = nexttime-starttime starttime = nexttime if animate is not None: # Continue in auto-spin if arcball not active animate[2] += deltatime * 20.0 arcball._qnow = quaternion_slerp(animate[0], animate[1], animate[2], False) glutPostRedisplay() set_animate_timer()
def matrix_slerp(matA, matB, alpha=0.6): if matA is None: return matB import transformations qA = transformations.quaternion_from_matrix(matA) qB = transformations.quaternion_from_matrix(matB) qC =transformations.quaternion_slerp(qA, qB, alpha) mat = matB.copy() mat[:3,3] = (alpha)*matA[:3,3] + (1-alpha)*matB[:3,3] mat[:3,:3] = transformations.quaternion_matrix(qC)[:3,:3] return mat
def interpolate_frames(skeleton, frames_a, frames_b, joint_list, start, end): blended_frames = deepcopy(frames_a[:]) window = end - start for joint in joint_list: idx = skeleton.animated_joints.index(joint) * 4 + 3 j_indices = [idx, idx + 1, idx + 2, idx + 3] for f in range(window): t = (float(f) / window) q_a = frames_a[start + f][j_indices] q_b = frames_b[start + f][j_indices] blended_frames[start + f][j_indices] = quaternion_slerp( q_a, q_b, t, spin=0, shortestpath=True) return blended_frames
def create_transition_using_slerp_backward(quat_frames, start_frame, end_frame, q_indices): start_q = quat_frames[start_frame, q_indices] steps = end_frame - start_frame for i in range(steps): t = float(i) / steps end_q = quat_frames[i, q_indices] slerp_q = quaternion_slerp(start_q, end_q, t, spin=0, shortestpath=True) quat_frames[start_frame + i, q_indices] = slerp_q
def interpolate(self, start_idx, end_idx, t): new_frame = np.zeros(self.frames[0].shape) new_frame[:3] = ( 1 - t) * self.frames[start_idx][:3] + t * self.frames[end_idx][:3] for i in range(3, new_frame.shape[0], 4): start_q = self.frames[start_idx][i:i + 4] end_q = self.frames[end_idx][i:i + 4] new_frame[i:i + 4] = quaternion_slerp(start_q, end_q, t, spin=0, shortestpath=True) return new_frame
def slerp_quaternion_frame(frame_a, frame_b, weight): frame_a = np.asarray(frame_a) frame_b = np.asarray(frame_b) assert len(frame_a) == len(frame_b) n_joints = int((len(frame_a) - 3) / 4) new_frame = np.zeros(len(frame_a)) # linear interpolate root translation new_frame[:3] = (1 - weight) * frame_a[:3] + weight * frame_b[:3] for i in range(n_joints): new_frame[3 + i * 4:3 + (i + 1) * 4] = quaternion_slerp( frame_a[3 + i * 4:3 + (i + 1) * 4], frame_b[3 + i * 4:3 + (i + 1) * 4], weight) return new_frame
def create_frames_using_slerp(quat_frames, start_frame, end_frame, steps, joint_parameter_indices): start_q = quat_frames[start_frame, joint_parameter_indices] end_q = quat_frames[end_frame, joint_parameter_indices] frames = [] for i in range(steps): t = float(i) / steps slerp_q = quaternion_slerp(start_q, end_q, t, spin=0, shortestpath=True) frames.append(slerp_q) return frames
def update(self): new_time = time.time() dt = new_time - self.t_last_update self.t_last_update = new_time self.orientation = transformations.quaternion_slerp(self.orientation, self.input_orientation, 0.1) mmv = self.view dleft = np.array([mmv[0,0],mmv[1,0],mmv[2,0]]) # delta along the left camera axis dup = np.array([mmv[0,1],mmv[1,1],mmv[2,1]]) # delta along the up camera axis dpoint = np.array([mmv[0,2],mmv[1,2],mmv[2,2]]) # delta along the pointer camera axis vleft = np.dot(self.velocity,dleft) vup = np.dot(self.velocity,dup) vpoint = np.dot(self.velocity,dpoint) velocity_norm = np.linalg.norm(self.velocity) acceleration = dleft*self.c_acceleration[0]+dup*self.c_acceleration[1]+dpoint*self.c_acceleration[2] if GlobalSettings.read('killspeed'): GlobalSignals.set('speed killing: ','On') self.velocity *=0.95 if velocity_norm < 0.05: GlobalSettings.set('killspeed', False) GlobalSignals.set('speed killing: ','Off') else: GlobalSignals.set('speed killing: ','Off') if GlobalSettings.read('alignspeed'): GlobalSignals.set('speed aligning: ','On') self.velocity = dleft*vleft*0.95+dup*vup*0.95 self.velocity += -dpoint*np.sqrt(velocity_norm**2-(vleft*0.95)**2-(vup*0.95)**2) if (vup<0.05 and vleft<0.05): GlobalSettings.set('alignspeed', False) GlobalSignals.set('speed aligning: ','Off') else: GlobalSignals.set('speed aligning: ','Off') self.velocity = self.velocity+acceleration*dt self.position = self.position+self.velocity*dt self.c_acceleration[:]=[0,0,0] GlobalSignals.set('velocity: ',[vpoint,vleft,vup])
def blend_between_frames(skeleton, frames, start, end, joint_list, window): for joint in joint_list: idx = skeleton.animated_joints.index(joint) * 4 + 3 j_indices = [idx, idx + 1, idx + 2, idx + 3] start_q = frames[start][j_indices] end_q = frames[end][j_indices] print(joint, window) for i in range(window): t = float(i) / window slerp_q = quaternion_slerp(start_q, end_q, t, spin=0, shortestpath=True) frames[start + i][j_indices] = slerp_q
def interpolate(cameras, n_inter): ''' Interpolate camera parameters to create virtual cameras Args: cams: list of cameras. Each entry is a tuple with camera params RTfckp n_inter: number of interpolations to make between each camera pair Returns: vcams: a list with all the interpolated virtual cameras ''' from transformations import quaternion_from_matrix, quaternion_slerp, quaternion_matrix, interpolate_spherical ncams = len(cameras) inter_cameras = [] for i in range(ncams): inter_cameras.append([]) fractions = np.linspace(0, 1, n_inter + 2)[1:-1] inter_idx = 1 for inter in range(n_inter): # Interpolate rotation matrices (R) in quaternion space. q0 = quaternion_from_matrix(cameras[i][0]) q1 = quaternion_from_matrix(cameras[(i + 1) % ncams][0]) q_inter = quaternion_slerp(q0, q1, fractions[inter]) cam_inter = [quaternion_matrix(q_inter)[:3, :3]] # Interpolate translations (T) in spherical coords around the center # probably the ideal thing would be to use dual quaternion slerp. T_mid = interpolate_spherical( cameras[i][1].reshape(1, -1), cameras[(i + 1) % ncams][1].reshape(1, -1), fractions[inter]).T cam_inter.append(T_mid) # Linear interpolation for the rest of numeric params (f, c, k, p) for j in range(2, 6): cam_inter.append( (cameras[i][j] + cameras[(i + 1) % ncams][j]) / 2.) # Give it a dummy name (name) - v for virtual cam_inter.append(cameras[i][6] + ".v{0}".format(inter_idx)) inter_idx = inter_idx + 1 inter_cameras[-1].append(cam_inter) # Put everything into one big list #allcams = sum([[cam]+intercam for cam,intercam in zip(cameras, inter_cameras)], []) vcams = list(itertools.chain(*inter_cameras)) return vcams
def smooth_quaternion_frames_using_slerp_(quat_frames, joint_parameter_indices, event_frame, window): start_frame = event_frame - window / 2 end_frame = event_frame + window / 2 start_q = quat_frames[start_frame, joint_parameter_indices] end_q = quat_frames[end_frame, joint_parameter_indices] for i in range(window): t = float(i) / window #nlerp_q = self.nlerp(start_q, end_q, t) slerp_q = quaternion_slerp(start_q, end_q, t, spin=0, shortestpath=True) #print "slerp",start_q, end_q, t, nlerp_q, slerp_q quat_frames[start_frame + i, joint_parameter_indices] = slerp_q
def blend_chain(self, frame, new_frame, weights): self.joint_indices = [] joint_name = self.joint_name w_idx = 0 while joint_name != self.chain_end_joint: if w_idx >= len(weights): print("Not enough weights", joint_name, w_idx) return frame idx = self.skeleton.nodes[ joint_name].parent.quaternion_frame_index * 4 + 3 q1 = normalize(frame[idx:idx + 4]) q2 = normalize(new_frame[idx:idx + 4]) frame[idx:idx + 4] = normalize( quaternion_slerp(q1, q2, weights[w_idx])) #print(joint_name, weights[w_idx], q2) joint_name = self.skeleton.nodes[joint_name].parent.node_name w_idx += 1 return frame
def interpLocus(self, ltime): """Given a time, interpolate the direction locus vector. """ # find the two rows spanning the current value for time self.dfLocus['uidx'] = np.where( np.sign(self.dfLocus['time'] - ltime).diff() > 0, True, False) # uidx is the index of the upper bound, uidx-1 is the lower bound uidx = self.dfLocus[self.dfLocus['uidx']].index.tolist()[0] # get parametric value between lower and upper, based on time t = (ltime - self.dfLocus.ix[uidx - 1]['time']) / ( self.dfLocus.ix[uidx]['time'] - self.dfLocus.ix[uidx - 1]['time']) # get the two quats at lower and upper lq = self.dfLocus.ix[uidx - 1]['quat'] uq = self.dfLocus.ix[uidx]['quat'] # and do slerp # http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/slerp/ q = tr.quaternion_slerp(lq[0], uq[0], t) yaw, pit, rol = tr.euler_from_quaternion(q, axes=self.sequence) return yaw, pit, rol, q
def random_sequence(duration=10.0, poses=5, radius=1.0, fps=30.0): """ Args: duration: in seconds poses: number of different target poses radius: random translation within a box size [-radius, radius] fps: 1./seconds """ # First pose is identity quat, identity translation Start = np.array([1, 0, 0, 0], "f"), np.array([0, 0, 0]) ts = 0.0 matrices = [] timestamps = [] dt = 1.0 / fps for i in range(poses): # New target pose End = random_pose(radius) frac = 0.0 while not np.allclose(frac, 1.0): RT = np.eye(4, dtype="f") # Rotation Q = transformations.quaternion_slerp(Start[0], End[0], frac) RT[:3, :3] = transformations.quaternion_matrix(Q)[:3, :3] # Translation T = (1 - frac) * Start[1] + (frac) * End[1] RT[:3, 3] = T timestamps.append(ts) matrices.append(RT) ts += dt frac += dt / (duration / poses) Start = End return PoseSequence(np.array(matrices), np.array(timestamps))
def linear_blending(ref_pose, quat_frames, skeleton, weights, joint_list=None): ''' Apply linear blending on quaternion motion data :param ref_pose (numpy.array) :param quat_frames: :param skeleton (morphablegraphs.animation_data.Skeleton): :param weights (numpy.array): weights used for slerp :param joint_list (list): animated joint to be blended :return: ''' if joint_list is None: joint_list = skeleton.animated_joints new_frames = deepcopy(quat_frames) for i in range(len(quat_frames)): for joint in joint_list: joint_index = skeleton.nodes[joint].quaternion_frame_index start_index = LEN_ROOT_POS + LEN_QUAT * joint_index end_index = LEN_ROOT_POS + LEN_QUAT * (joint_index + 1) ref_q = ref_pose[start_index:end_index] motion_q = quat_frames[i, start_index:end_index] new_frames[i, start_index:end_index] = quaternion_slerp( ref_q, motion_q, weights[i]) return new_frames
def interpolate_constraints(c1, c2): p = (c1.position + c2.position) / 2 o = quaternion_slerp(c1.orientation, c2.orientation, 0.5) o = normalize(o) return MotionGroundingConstraint(c1.frame_idx, c1.joint_name, p, None, o)
def _step( self , t , dt ) : if t <= self.maxt : self.c = (self. e-self. b) * t / self.maxt self.qc = tr.quaternion_slerp( self.qb, self.qe, t/self.maxt )