def quaternion_lerp(quat_from, quat_to, f_interpolation): f_invert_interpolation = 1 - f_interpolation q = RLPy.RQuaternion() # Are we on the right (1) side of the graph or the left side (-1)? direction = (((quat_from.x * quat_to.x) + (quat_from.y * quat_to.y)) + (quat_from.z * quat_to.z)) + (quat_from.w * quat_to.w) if direction >= 0: q.x = (f_invert_interpolation * quat_from.x) + (f_interpolation * quat_to.x) q.y = (f_invert_interpolation * quat_from.y) + (f_interpolation * quat_to.y) q.z = (f_invert_interpolation * quat_from.z) + (f_interpolation * quat_to.z) q.w = (f_invert_interpolation * quat_from.w) + (f_interpolation * quat_to.w) else: q.x = (f_invert_interpolation * quat_from.x) - (f_interpolation * quat_to.x) q.y = (f_invert_interpolation * quat_from.y) - (f_interpolation * quat_to.y) q.z = (f_invert_interpolation * quat_from.z) - (f_interpolation * quat_to.z) q.w = (f_invert_interpolation * quat_from.w) - (f_interpolation * quat_to.w) # Now that we have the lerped coordinates what side of the graph are we on? side = (((q.x * q.x) + (q.y * q.y)) + (q.z * q.z)) + (q.w * q.w) # We have to adjust the quaternion values depending on the side we are on orientation = 1 / sqrt((side)) q.x *= orientation q.y *= orientation q.z *= orientation q.w *= orientation return q
def e_to_q(e): parent_m = RLPy.RMatrix3() matrix3_result = parent_m.FromEulerAngle( RLPy.EEulerOrder_XYZ, e[0] * RLPy.RMath.CONST_DEG_TO_RAD, e[1] * RLPy.RMath.CONST_DEG_TO_RAD, e[2] * RLPy.RMath.CONST_DEG_TO_RAD) parent_q = RLPy.RQuaternion() parent_q.FromRotationMatrix(matrix3_result[0]) return parent_q
def q_to_e(q): abc = RLPy.RQuaternion(RLPy.RVector4(q.x, q.y, q.z, q.w)) matrix3 = abc.ToRotationMatrix() temp_x = 0 temp_y = 0 temp_z = 0 ret = matrix3.ToEulerAngle(RLPy.EEulerOrder_XYZ, temp_x, temp_y, temp_z) ret[0] = ret[0] * RLPy.RMath.CONST_RAD_TO_DEG ret[1] = ret[1] * RLPy.RMath.CONST_RAD_TO_DEG ret[2] = ret[2] * RLPy.RMath.CONST_RAD_TO_DEG return ret
def set_bone_transform(animation_clip, bone, time, pose, offset, mirror=False, ignoreFilters=False): global pc_ui bone_control = animation_clip.GetControl("Layer", bone) data_block = bone_control.GetDataBlock() q_pose = RLPy.RQuaternion( RLPy.RVector4(pose["r"][0], pose["r"][1], pose["r"][2], pose["r"][3])) q_offset = RLPy.RQuaternion( RLPy.RVector4(offset["r"][0], offset["r"][1], offset["r"][2], offset["r"][3])) q_real = q_pose.Multiply(q_offset.Inverse()) m_real = q_real.ToRotationMatrix() x = y = z = 0 r_real = m_real.ToEulerAngle(RLPy.EEulerOrder_XYZ, x, y, z) # Set key for the bone if pc_ui["widget"].rotation_x.isChecked() or ignoreFilters: data_block.GetControl("Rotation/RotationX").SetValue(time, r_real[0]) if pc_ui["widget"].rotation_y.isChecked() or ignoreFilters: data_block.GetControl("Rotation/RotationY").SetValue( time, r_real[1] if mirror is False else -r_real[1]) if pc_ui["widget"].rotation_z.isChecked() or ignoreFilters: data_block.GetControl("Rotation/RotationZ").SetValue( time, r_real[2] if mirror is False else -r_real[2]) if data_block.GetControl("Position/PositionX") is not None: data_block.GetControl("Position/PositionX").SetValue( time, pose["t"][0] - offset["t"][0]) data_block.GetControl("Position/PositionY").SetValue( time, pose["t"][1] - offset["t"][1]) data_block.GetControl("Position/PositionZ").SetValue( time, pose["t"][2] - offset["t"][2])
def destination_transform(): # Destination transform is where the camera/view should be at when aligned with the target prop global ui prop_transform = all_props[ui["widget"].prop.currentIndex()].WorldTransform() # Position vector is where the prop is plus the offset values pos = prop_transform.T() + RLPy.RVector3(ui["widget"].offset_x.value(), ui["widget"].offset_y.value(), ui["widget"].offset_z.value()) # Orientation matrix is the camera/view facing the prop position orientation = look_at_right_handed(pos, prop_transform.T(), RLPy.RVector3(0, 0, 1), ui["widget"].elevation.value()) # Extrapolate the Quaternion rotations from the orientation matrix rot = RLPy.RQuaternion().FromRotationMatrix(orientation) # Return a Transform class with scale, rotation, and positional values return RLPy.RTransform(RLPy.RVector3(1, 1, 1), rot, pos)
def from_to_rotation(from_vector, to_vector): # Points the from axis towards the to vector, returns a Quaternion result = RLPy.RQuaternion() from_vector.Normalize() to_vector.Normalize() up_axis = RLPy.RVector3(RLPy.RVector3.UNIT_Z) angle = RLPy.RMath_ACos(from_vector.Dot(to_vector)) if RLPy.RMath.AlmostZero(angle - RLPy.RMath.CONST_PI) or RLPy.RMath.AlmostZero(angle): result.FromAxisAngle(up_axis, angle) else: normal = from_vector.Cross(to_vector) normal.Normalize() result.FromAxisAngle(normal, angle) return result
def destination_transform(): # Destination transform is where the camera/view should be at when aligned with the target prop # Retrieve RIObject for the camera and the target prop prop = follow_control.value camera = camera_control.value # Position vector is where the prop is plus the offset values pos = prop.WorldTransform().T() + offset_control.value # Orientation matrix is the camera/view facing the prop position orientation = look_at_right_handed(pos, prop.WorldTransform().T(), Ext.Vector3.up) # Extrapolate the Quaternion rotations from the orientation matrix rot = RLPy.RQuaternion().FromRotationMatrix(orientation) # Return a Transform class with scale, rotation, and positional values return RLPy.RTransform(Ext.Vector3.one, rot, pos)
def ToQuaternion(self): # yaw (Z), pitch (X), roll (Y) cy = cos(self.z * 0.5) sy = sin(self.z * 0.5) cp = cos(self.x * 0.5) sp = sin(self.x * 0.5) cr = cos(self.y * 0.5) sr = sin(self.y * 0.5) q = RLPy.RQuaternion() q.w = cy * cp * cr + sy * sp * sr q.x = cy * cp * sr - sy * sp * cr q.y = sy * cp * sr + cy * sp * cr q.z = sy * cp * cr - cy * sp * sr return q