Esempio n. 1
0
def collect_parameters(apply):
    # collect parameters and perform initialization
    global last_x
    global last_y
    global last_z

    global bone_hierarchy
    # Return an ordered bone hierarchy based on what was selected in the UI
    bone_hierarchy = bone_tree_view.value

    for i in bone_hierarchy:
        for bone in bone_hierarchy[i]:
            # Get the datablock for the current bone
            animation_clip = bone["root"].GetSkeletonComponent().GetClip(0)
            if apply:
                animation_clip.SetLength(timeline.end_time)
            control = animation_clip.GetControl("Layer", bone["bone"])
            if not control:
                return
            control.ClearKeys()
            wt = bone["bone"].WorldTransform()

            last_x = bone["root"].WorldTransform().T().x
            last_y = bone["root"].WorldTransform().T().y
            last_z = bone["root"].WorldTransform().T().z

            lt = bone["bone"].LocalTransform()
            bone["data block"] = control.GetDataBlock()
            bone["velocity"] = RLPy.RVector3(0, 0, 0)  # Start off with zero velocity
            bone["current tip pos local"] = RLPy.RVector3(lt.T().x, lt.T().y, lt.T().z)
            bone["current tip pos world"] = transform_point(wt, bone["current tip pos local"])
            bone["stiffness"] = transform_point(wt, bone["current tip pos local"]) - wt.T()
            bone["stiff force"] = RLPy.RVector3(0, 0, 0)
            bone["original world transform"] = wt.R().ToRotationMatrix()
Esempio n. 2
0
def e_to_q(e):
    parent_m = RLPy.RMatrix3()
    matrix3_result = parent_m.FromEulerAngle(
        RLPy.EEulerOrder_XYZ, e[0] * RLPy.RMath.CONST_DEG_TO_RAD,
        e[1] * RLPy.RMath.CONST_DEG_TO_RAD, e[2] * RLPy.RMath.CONST_DEG_TO_RAD)
    parent_q = RLPy.RQuaternion()
    parent_q.FromRotationMatrix(matrix3_result[0])
    return parent_q
Esempio n. 3
0
    def merge_next_clip(self, clip, cur_time, end_time):
        global avatar
        global mocap_manager
        global start_time
        global hand_device
        global device_data
        if avatar is None or clip is None:
            return
        skel_comp = avatar.GetSkeletonComponent()
        if skel_comp is None:
            return

        # break time
        fps = RLPy.RGlobal.GetFps()
        buffer_time = RLPy.RTime.IndexedFrameTime(2, fps)
        break_time = cur_time + RLPy.RTime.IndexedFrameTime(1, fps)

        # break & merge clips
        result = skel_comp.BreakClip(break_time)

        # stop and generate mocap clip
        hand_device.ProcessData(0, device_data, -1)
        mocap_manager.Stop()
        mocap_clip, mocap_time = self.get_clip_and_end_time(start_time)
        remaining_clip, remaining_time = self.get_clip_and_end_time(
            break_time + RLPy.RTime.IndexedFrameTime(1, fps))

        # merge clips
        if remaining_clip is not None:
            transition_time = RLPy.RTime.IndexedFrameTime(
                self.clip_transition_frames, fps)
            remaining_clip.SetTransitionRange(transition_time)
            result = skel_comp.MergeClips(mocap_clip, remaining_clip)
            # merge middle clip
            if result.IsError():
                clip_length = mocap_clip.GetClipLength()
                mocap_end_time = mocap_clip.ClipTimeToSceneTime(clip_length)
                remaining_start_time = remaining_clip.ClipTimeToSceneTime(
                    RLPy.RTime(0))
                middle_start_time = RLPy.RTime(0)
                middle_clip, middle_clip_end_time = self.get_clip_and_end_time(
                    remaining_start_time - RLPy.RTime.IndexedFrameTime(1, fps))
                if middle_clip != None:
                    middle_start_time = middle_clip.ClipTimeToSceneTime(
                        RLPy.RTime(0))
                    if middle_start_time != remaining_start_time:
                        skel_comp.MergeClips(mocap_clip, middle_clip)
                        remaining_clip, remaining_time = self.get_clip_and_end_time(
                            end_time)
                        mocap_clip, mocap_time = self.get_clip_and_end_time(
                            start_time)
                        skel_comp.MergeClips(mocap_clip, remaining_clip)

        RLPy.RGlobal.SetTime(cur_time)
        RLPy.RGlobal.ObjectModified(avatar, RLPy.EObjectModifiedType_Transform)
        return True
Esempio n. 4
0
def automation():
    camera = RLPy.RScene.GetCurrentCamera()
    dof = camera.GetDOFData()
    items = RLPy.RScene.GetSelectedObjects()
    set_key = False  # Did the parameters change, forcing an update?

    caf_ui["window"].SetWindowTitle("Camera Auto-Focus { %s }" % camera.GetName())
    caf_ui["widget"].targetObject.clear()
    caf_ui["widget"].distance.clear()

    if len(items) > 0:
        object_type = items[0].GetType()

        if object_type == RLPy.EObjectType_Avatar or object_type == RLPy.EObjectType_Prop:  # If the object is valid: avatar or prop
            camera_transform = camera.WorldTransform()
            target_transform = items[0].WorldTransform()
            local_point = world_to_local_point(camera_transform, target_transform.T())

            # Update the UI
            caf_ui["widget"].targetObject.setText(items[0].GetName())
            caf_ui["widget"].distance.setText(str(round(-local_point.z, 2)))

            if caf_ui["widget"].autoFocus_group.isChecked():  # Automatic calculation of the DOF distance
                # Caculate auto-focus based on parameter settings
                focus_distance = -local_point.z * caf_ui["widget"].autoDistance.value()
                focus_distance = max(min(focus_distance, 5000), 2)  # iClone Focus Distance value is between 2 and 5,000

                if dof.GetFocus() != focus_distance:
                    # Apply a Focus Distance key to the camera
                    dof.SetFocus(focus_distance)
                    set_key = True  # Parameters don't match, we'll need to update

            if caf_ui["widget"].autoRange_group.isChecked():  # Automatic calculation of the DOF range
                # Get the bounding size of the picked object
                max_point = RLPy.RVector3()
                cen_point = RLPy.RVector3()
                min_point = RLPy.RVector3()

                status = items[0].GetBounds(max_point, cen_point, min_point)

                if status == RLPy.RStatus.Success:
                    focus_range = ((max_point.x - cen_point.x) + (max_point.y - cen_point.y) + (max_point.z - cen_point.z)) / 3
                    focus_range *= caf_ui["widget"].autoRange.value()
                    focus_range = max(min(focus_range, 1000), 0)  # iClone Focus Range value is between 0 and 1,000

                    if dof.GetRange() != focus_range:
                        # Apply a Focus Range key to the camera
                        dof.SetRange(focus_range)
                        set_key = True  # Parameters don't match, we'll need to update

            if set_key:
                # Create and set a DOF key
                dof.SetEnable(True)
                key = RLPy.RKey()
                key.SetTime(RLPy.RGlobal.GetTime())
                camera.AddDofKey(key, dof)
Esempio n. 5
0
class Vector3(RLPy.RVector3):
    # Inherit and extend on the RLPy vector3 class

    # Static for scale vector: iClone default scale is not [100,100,100] as shown in the UI
    one = RLPy.RVector3(1, 1, 1)

    # Static for up vector: iClone is Z-up application
    up = RLPy.RVector3(0, 0, 1)

    # Static for forward vector
    forward = RLPy.RVector3(0, 1, 0)

    def __str__(self):
        # For debugging: pretty print for the Vector3 values
        x = round(self.x, 3)
        y = round(self.y, 3)
        z = round(self.z, 3)
        return "({0}, {1}, {2})".format(str(x), str(y), str(z))

    def __mul__(self, n):
        v3 = Vector3(self.x * n, self.y * n, self.z * n)
        return v3

    __rmul__ = __mul__

    def ToQuaternion(self):
        # yaw (Z), pitch (X), roll (Y)
        cy = cos(self.z * 0.5)
        sy = sin(self.z * 0.5)
        cp = cos(self.x * 0.5)
        sp = sin(self.x * 0.5)
        cr = cos(self.y * 0.5)
        sr = sin(self.y * 0.5)

        q = Quaternion()
        q.w = cy * cp * cr + sy * sp * sr
        q.x = cy * cp * sr - sy * sp * cr
        q.y = sy * cp * sr + cy * sp * cr
        q.z = sy * cp * cr - cy * sp * sr

        return q

    def Lerp(self, v3_to, f_interpolation):
        return Vector3(lerp(self.x, v3_to.x, f_interpolation),
                       lerp(self.y, v3_to.y, f_interpolation),
                       lerp(self.z, v3_to.z, f_interpolation))

    def Scale(self, v3):
        return Vector3(self.x * v3.x, self.y * v3.y, self.z * v3.z)

    @property
    def normalized(self):
        norm = self
        norm.Normalize()
        return norm
Esempio n. 6
0
def q_to_e(q):
    abc = RLPy.RQuaternion(RLPy.RVector4(q.x, q.y, q.z, q.w))
    matrix3 = abc.ToRotationMatrix()
    temp_x = 0
    temp_y = 0
    temp_z = 0
    ret = matrix3.ToEulerAngle(RLPy.EEulerOrder_XYZ, temp_x, temp_y, temp_z)
    ret[0] = ret[0] * RLPy.RMath.CONST_RAD_TO_DEG
    ret[1] = ret[1] * RLPy.RMath.CONST_RAD_TO_DEG
    ret[2] = ret[2] * RLPy.RMath.CONST_RAD_TO_DEG
    return ret
Esempio n. 7
0
def update_focal_length_slider(x):
    global cdz_ui

    camera = RLPy.RScene.GetCurrentCamera()

    cdz_ui["widget"].focalLengthSlider.setValue(x * 100)
    camera.SetFocalLength(RLPy.RGlobal.GetTime(), x)

    # Force update iClone's native UI
    RLPy.RGlobal.SetTime(RLPy.RGlobal.GetTime() + RLPy.RTime(1))
    RLPy.RGlobal.SetTime(RLPy.RGlobal.GetTime() - RLPy.RTime(1))
Esempio n. 8
0
def register_events():
    global caf_callbacks

    # Register timer callback for auto-focus updates
    caf_callbacks["timer"] = RLPy.RPyTimer()
    # Every frame of iClone is 16.66667 ms, which is an interval of 16
    # We can capture a key every 20 frames to prevent heavy scenes from freezing
    caf_callbacks["timer"].SetInterval(320)
    caf_callbacks["timer_callback"] = AutoFocusTimerCallback()
    caf_callbacks["timer"].RegisterPyTimerCallback(
        caf_callbacks["timer_callback"])
    caf_callbacks["timer"].Start()

    # Register timer callback for reminding the user that auto-key is on
    caf_callbacks["flash_message"] = RLPy.RPyTimer()
    caf_callbacks["flash_message"].SetInterval(
        500)  # Flash auto-key message every 500 ms -> 0.5 sec
    caf_callbacks["message_callback"] = MessageCallback()
    caf_callbacks["flash_message"].RegisterPyTimerCallback(
        caf_callbacks["message_callback"])
    caf_callbacks["flash_message"].Start()

    # Create Event
    caf_callbacks["events"] = EventCallback()
    caf_callbacks["events_id"] = RLPy.REventHandler.RegisterCallback(
        caf_callbacks["events"])

    # Double spinbox and integer slider connection
    caf_ui["widget"].autoDistance_slider.valueChanged.connect(
        lambda x: caf_ui["widget"].autoDistance.setValue(x * 0.001))
    caf_ui["widget"].autoRange_slider.valueChanged.connect(
        lambda x: caf_ui["widget"].autoRange.setValue(x * 0.001))
    caf_ui["widget"].nearBlurStrength_slider.valueChanged.connect(
        lambda x: caf_ui["widget"].nearBlurStrength.setValue(x * 0.001))
    caf_ui["widget"].farBlurStrength_slider.valueChanged.connect(
        lambda x: caf_ui["widget"].farBlurStrength.setValue(x * 0.001))
    caf_ui["widget"].autoDistance.valueChanged.connect(
        lambda x: caf_ui["widget"].autoDistance_slider.setValue(x * 1000))
    caf_ui["widget"].autoRange.valueChanged.connect(
        lambda x: caf_ui["widget"].autoRange_slider.setValue(x * 1000))

    # Call to update camera based on changes in custom parameters
    caf_ui["widget"].focusDistance.valueChanged.connect(set_camera_dof)
    caf_ui["widget"].perfectFocusRange.valueChanged.connect(set_camera_dof)
    caf_ui["widget"].nearTransitionRegion.valueChanged.connect(set_camera_dof)
    caf_ui["widget"].farTransitionRegion.valueChanged.connect(set_camera_dof)
    caf_ui["widget"].nearBlurStrength.valueChanged.connect(set_camera_dof)
    caf_ui["widget"].farBlurStrength.valueChanged.connect(set_camera_dof)

    # Start and stop the auto-focus timer depending on the automatic settings
    caf_ui["widget"].autoFocus_group.toggled.connect(toggle_autofocus_timer)
    caf_ui["widget"].autoRange_group.toggled.connect(toggle_autofocus_timer)

    update_ui()
Esempio n. 9
0
def look_at_right_handed(view_position, view_target, view_up_vector, lookup_offset=0):
    # Look at takes two positional vectors and calculates the facing direction
    forward = (view_position - view_target) - RLPy.RVector3(0, 0, lookup_offset)
    forward.Normalize()
    right = view_up_vector.Cross(forward)
    right.Normalize()
    up = forward.Cross(right)

    # Retun a right-handed look-at rotational matrix
    return RLPy.RMatrix3(right.x, right.y, right.z,
                         up.x, up.y, up.z,
                         forward.x, forward.y, forward.z)
Esempio n. 10
0
def destination_transform():
    # Destination transform is where the camera/view should be at when aligned with the target prop
    global ui

    prop_transform = all_props[ui["widget"].prop.currentIndex()].WorldTransform()
    # Position vector is where the prop is plus the offset values
    pos = prop_transform.T() + RLPy.RVector3(ui["widget"].offset_x.value(), ui["widget"].offset_y.value(), ui["widget"].offset_z.value())
    # Orientation matrix is the camera/view facing the prop position
    orientation = look_at_right_handed(pos, prop_transform.T(), RLPy.RVector3(0, 0, 1), ui["widget"].elevation.value())
    # Extrapolate the Quaternion rotations from the orientation matrix
    rot = RLPy.RQuaternion().FromRotationMatrix(orientation)
    # Return a Transform class with scale, rotation, and positional values
    return RLPy.RTransform(RLPy.RVector3(1, 1, 1), rot, pos)
Esempio n. 11
0
def apply_setting():

    global timeline
    timeline = Ext.TimeLine()
    current_frame = timeline.current_frame

    current_time = timeline.IndexedFrameTime(current_frame)
    RLPy.RGlobal.SetTime(current_time)

    accumulate_dalay = 0
    selected_popcorns = popcorn_list_tree_view.value
    for popcorn in selected_popcorns:
        popcorn_name = popcorn.GetName()
        popcorn.RemoveEmitKeys()

        emit = True
        if popcorn_name in particle_emit_dic:
            emit = particle_emit_dic[popcorn_name]
        if emit:
            if popcorn_name in particle_delay_dic:
                accumulate_dalay = accumulate_dalay + particle_delay_dic[popcorn_name]
            popcorn.SetEmit(RLPy.RTime(accumulate_dalay * 1000) + current_time, True)

            is_particle_loop = False
            if popcorn_name in particle_loop_dic:
                loop_interval = particle_loop_dic[popcorn_name]
                if loop_interval == -1:
                    popcorn.SetLoop(False)
                else:
                    is_particle_loop = True
                    popcorn.SetLoop(True)
                    popcorn.SetLoopInterval(loop_interval)
            else:
                popcorn.SetLoop(False)

            repeat_emit_delay_time = 0
            on_off_index = 0
            if not(is_particle_loop):
                if popcorn_name in particle_emit_key_dic:
                    particle_emit_key_time_list = particle_emit_key_dic[popcorn_name]
                    for key_time in particle_emit_key_time_list:
                        repeat_emit_delay_time = repeat_emit_delay_time + key_time
                        if on_off_index % 2 == 0:
                            popcorn.SetEmit(RLPy.RTime((accumulate_dalay + repeat_emit_delay_time) * 1000) + current_time, False)
                        else:
                            popcorn.SetEmit(RLPy.RTime((accumulate_dalay + repeat_emit_delay_time) * 1000) + current_time, True)
                        on_off_index = on_off_index + 1

        popcorn.Update()

    RLPy.RGlobal.Play(timeline.start_time, timeline.end_time)
Esempio n. 12
0
def from_to_rotation(from_vector, to_vector):
    # Points the from axis towards the to vector, returns a Quaternion
    result = RLPy.RQuaternion()
    from_vector.Normalize()
    to_vector.Normalize()
    up_axis = RLPy.RVector3(RLPy.RVector3.UNIT_Z)
    angle = RLPy.RMath_ACos(from_vector.Dot(to_vector))
    if RLPy.RMath.AlmostZero(angle - RLPy.RMath.CONST_PI) or RLPy.RMath.AlmostZero(angle):
        result.FromAxisAngle(up_axis, angle)
    else:
        normal = from_vector.Cross(to_vector)
        normal.Normalize()
        result.FromAxisAngle(normal, angle)
    return result
Esempio n. 13
0
def transform_point(world_transform, local_position):
    # Get the transform matrix4
    world_matrix = world_transform.Matrix()

    # New matrix4 for the local position
    point_world_matrix = RLPy.RMatrix4()
    point_world_matrix.MakeIdentity()
    point_world_matrix.SetTranslate(local_position)

    # Combine the 2 matrix4
    point_world_matrix = point_world_matrix * world_matrix

    # Return the translation element of the combined matrix4
    return RLPy.RVector3(point_world_matrix.GetTranslate())
Esempio n. 14
0
def destination_transform():
    # Destination transform is where the camera/view should be at when aligned with the target prop
    # Retrieve RIObject for the camera and the target prop
    prop = follow_control.value
    camera = camera_control.value
    # Position vector is where the prop is plus the offset values
    pos = prop.WorldTransform().T() + offset_control.value
    # Orientation matrix is the camera/view facing the prop position
    orientation = look_at_right_handed(pos,
                                       prop.WorldTransform().T(),
                                       Ext.Vector3.up)
    # Extrapolate the Quaternion rotations from the orientation matrix
    rot = RLPy.RQuaternion().FromRotationMatrix(orientation)
    # Return a Transform class with scale, rotation, and positional values
    return RLPy.RTransform(Ext.Vector3.one, rot, pos)
Esempio n. 15
0
def local_move(obj, translation):
    '''
    Calculate the world-space position for an object that is locally translated

    Args:
        obj (RIObject): the iClone object that serves as the transform space
        translation (RVector3): point in local-space

    Returns:
        RVector3: the world-space position for the object after local translation is applied
    '''
    transform = obj.WorldTransform()
    matrix = transform.Matrix()
    matrix.SetTranslate(
        RLPy.RVector3.ZERO)  # Zero out transform matrix translation

    # New matrix for the local translation
    local_matrix = RLPy.RMatrix4()
    local_matrix.MakeIdentity()
    local_matrix.SetTranslate(translation)

    # Get the world-space offset position for the local movement
    offset_matrix = local_matrix * matrix

    # Return the final position for the object
    return transform.T() - offset_matrix.GetTranslate()
Esempio n. 16
0
def relational_position(parent_obj, child_obj):
    '''
    Converts world-space point to local-space position relative to the transform-space.

    Args:
        parent_obj (RIObject): Object with transform-space for world to local point conversion.
        child_obj (RIObject): Object as the point in world-space that needs to be converted to local-space.

    Returns:
        RVector3: The world-space point converted to local-space.
    '''
    parent_transform = parent_obj.WorldTransform()
    child_transform = child_obj.WorldTransform()

    parent_matrix = parent_transform.Matrix()
    child_matrix = child_transform.Matrix()

    local_position = child_matrix.GetTranslate() - parent_matrix.GetTranslate(
    )  # Can also use transform.T()
    parent_matrix.SetTranslate(
        RLPy.RVector3.ZERO)  # Zero out transform matrix translation

    # New matrix4 for the local position
    point_world_matrix = RLPy.RMatrix4()
    point_world_matrix.MakeIdentity()
    point_world_matrix.SetTranslate(local_position)

    # Convert local space to transform space
    point_transform_matrix = point_world_matrix * parent_matrix.Inverse()

    # Return the translation element of the combined matrix4
    return point_transform_matrix.GetTranslate()
Esempio n. 17
0
def run_script():
    global rl_py_timer
    global timer_callback
    #init timer event
    rl_py_timer = RLPy.RPyTimer()
    rl_py_timer.SetInterval(100)
    timer_callback = RLPyTimerCallback()
    rl_py_timer.RegisterPyTimerCallback(timer_callback)
    timer_callback.register_time_out(update_skeleton)

    global jcm_manager_dlg
    jcm_manager_widget = JcmWidget()

    label = QtWidgets.QLabel()
    label.setText("The morph is defined by \nThe Script and Morph Creator.")

    #create RDialog
    jcm_manager_dlg = RLPy.RUi.CreateRDialog()
    jcm_manager_dlg.SetWindowTitle("Joint Driven Morph")
    #wrap RDialog to Pyside Dialog
    main_pyside_dlg = wrapInstance(int(jcm_manager_dlg.GetWindow()),
                                   QtWidgets.QDialog)
    main_pyside_layout = main_pyside_dlg.layout()

    main_pyside_layout.addWidget(label)
    main_pyside_layout.addWidget(jcm_manager_widget)
    main_pyside_dlg.setFixedWidth(200)
    #show dialog
    jcm_manager_dlg.Show()
Esempio n. 18
0
def world_to_local_point(transform, world_point):
    '''
    Converts world-space point to local-space position relative to the transform-space.

    Args:
        transform (RTransform): Transform space for world to local point conversion.
        world_point (RVector3): The point in world-space that needs to be converted to local-space.

    Returns:
        RVector3: The world-space point converted to local-space.
    '''
    transform_matrix = transform.Matrix()
    local_position = world_point - transform_matrix.GetTranslate()  # Can also use transform.T()
    transform_matrix.SetTranslate(RLPy.RVector3.ZERO)  # Zero out transform matrix translation

    # New matrix4 for the local position
    point_world_matrix = RLPy.RMatrix4()
    point_world_matrix.MakeIdentity()
    point_world_matrix.SetTranslate(local_position)

    # Convert local space to transform space
    point_transform_matrix = point_world_matrix * transform_matrix.Inverse()

    # Return the translation element of the combined matrix4
    return point_transform_matrix.GetTranslate()
Esempio n. 19
0
    def __init__(self,
                 label=None,
                 maxRange=1000,
                 singleStep=0.025,
                 parent=None):
        super(Vector3Control, self).__init__(parent)
        # Create a Horizontal Box Layout
        parent_layout = QtWidgets.QHBoxLayout()

        # Create and setup individual float spin box widgets
        self._x = QtWidgets.QDoubleSpinBox()
        self._y = QtWidgets.QDoubleSpinBox()
        self._z = QtWidgets.QDoubleSpinBox()
        for k, v in {"X": self._x, "Y": self._y, "Z": self._z}.items():
            v.setRange(-maxRange, maxRange)
            v.setSingleStep(singleStep)
            v.valueChanged.connect(self.__changeValue)
            parent_layout.addWidget(QtWidgets.QLabel(k))
            parent_layout.addWidget(v)

        # Configure the Group Box parent widget
        self.setTitle("{0} :".format(label))
        self.setLayout(parent_layout)
        self.setStyleSheet("QGroupBox  {color: #a2ec13}")

        # Private attribute(s)
        self.__value = RLPy.RVector3(0, 0, 0)
Esempio n. 20
0
def quaternion_lerp(quat_from, quat_to, f_interpolation):
    f_invert_interpolation = 1 - f_interpolation
    q = RLPy.RQuaternion()
    # Are we on the right (1) side of the graph or the left side (-1)?
    direction = (((quat_from.x * quat_to.x) + (quat_from.y * quat_to.y)) +
                 (quat_from.z * quat_to.z)) + (quat_from.w * quat_to.w)
    if direction >= 0:
        q.x = (f_invert_interpolation * quat_from.x) + (f_interpolation *
                                                        quat_to.x)
        q.y = (f_invert_interpolation * quat_from.y) + (f_interpolation *
                                                        quat_to.y)
        q.z = (f_invert_interpolation * quat_from.z) + (f_interpolation *
                                                        quat_to.z)
        q.w = (f_invert_interpolation * quat_from.w) + (f_interpolation *
                                                        quat_to.w)
    else:
        q.x = (f_invert_interpolation * quat_from.x) - (f_interpolation *
                                                        quat_to.x)
        q.y = (f_invert_interpolation * quat_from.y) - (f_interpolation *
                                                        quat_to.y)
        q.z = (f_invert_interpolation * quat_from.z) - (f_interpolation *
                                                        quat_to.z)
        q.w = (f_invert_interpolation * quat_from.w) - (f_interpolation *
                                                        quat_to.w)
    # Now that we have the lerped coordinates what side of the graph are we on?
    side = (((q.x * q.x) + (q.y * q.y)) + (q.z * q.z)) + (q.w * q.w)
    # We have to adjust the quaternion values depending on the side we are on
    orientation = 1 / sqrt((side))
    q.x *= orientation
    q.y *= orientation
    q.z *= orientation
    q.w *= orientation
    return q
Esempio n. 21
0
def set_transform_key(clone, time, transform):
    key = RLPy.RTransformKey()
    key.SetTime(time)
    key.SetTransform(transform)
    control = clone.GetControl("Transform")
    control.AddKey(key, RLPy.RGlobal.GetFps())
    control.SetKeyTransition(time, RLPy.ETransitionType_Ease_Out, 1.0)
Esempio n. 22
0
def key_camera():
    global ui

    # Get the frame and time duration of the target prop only
    fps = RLPy.RGlobal.GetFps()
    start_time = RLPy.RTime.IndexedFrameTime(ui["widget"].start_frame.value(),
                                             fps)
    end_time = RLPy.RTime.IndexedFrameTime(ui["widget"].end_frame.value(), fps)

    # How many keys are needed to create the whole animation from start to finish?
    key_interval = ui["widget"].delay.value()
    total_keys = int(
        RLPy.RMath.Ceil(
            (ui["widget"].end_frame.value() - ui["widget"].start_frame.value())
            / key_interval))

    # Show the progress bar
    ui["widget"].progress.setRange(0, total_keys)
    ui["widget"].progress.setHidden(False)
    ui["widget"].message.setHidden(True)

    camera = all_cameras[ui["widget"].camera.currentIndex()]

    # Iterate over every keyframe
    for key in range(0, total_keys):
        current_frame = ui["widget"].start_frame.value() + key * key_interval
        current_time = RLPy.RTime.IndexedFrameTime(int(current_frame), fps)
        view_transform = camera.WorldTransform()
        target_transform = destination_transform()
        # Step forward in the timeline
        RLPy.RGlobal.SetTime(current_time)
        ui["widget"].progress.setValue(key)
        # Lerp between the current camera position and its destination using tautness as the interpolate
        new_transform = transform_lerp(view_transform, target_transform,
                                       ui["widget"].tautness.value())
        # Key the camera's transform for animation
        camera.GetControl("Transform").SetValue(current_time, new_transform)

    # Post-process frame reduction
    if ui["widget"].reduction.value() > 0:
        control = camera.GetControl("Transform")
        key_count = control.GetKeyCount()
        interval = ui["widget"].reduction.value() + 1
        key_times = []

        for index in range(0, key_count):
            if index % interval != 0:
                key_times.append(RLPy.RTime())
                control.GetKeyTimeAt(index, key_times[len(key_times) - 1])

        for time in range(0, len(key_times)):
            control.RemoveKey(key_times[time])

    # Hide the progress bar
    ui["widget"].progress.setHidden(True)
    ui["widget"].message.setHidden(False)
    ui["widget"].progress.reset()
    # Playback to see the final result
    RLPy.RGlobal.Play(start_time, end_time)
Esempio n. 23
0
 def get_clip_and_end_time(self, cur_time):
     global avatar
     skel_comp = avatar.GetSkeletonComponent()
     if skel_comp == None:
         return
     find_clip = None
     clip_count = skel_comp.GetClipCount()
     clip_start_time = RLPy.RTime(0)
     clip_end_time = RLPy.RTime(0)
     for i in range(clip_count):
         clip = skel_comp.GetClip(i)
         clip_length = clip.GetClipLength()
         clip_start_time = clip.ClipTimeToSceneTime(RLPy.RTime(0))
         clip_end_time = clip.ClipTimeToSceneTime(clip_length)
         if cur_time >= clip_start_time and cur_time <= clip_end_time:
             find_clip = clip
     return find_clip, clip_end_time
Esempio n. 24
0
 def find_next_clip_transition_by_time(self, cur_time):
     global avatar
     skel_comp = avatar.GetSkeletonComponent()
     if skel_comp == None:
         return
     find_clip = None
     transition_time = RLPy.RTime(0)
     clip_count = skel_comp.GetClipCount()
     for i in range(clip_count):
         clip = skel_comp.GetClip(i)
         clip_start_time = clip.ClipTimeToSceneTime(RLPy.RTime(0))
         transition_time = clip.GetTransitionRange()
         transition_begin_time = clip_start_time - transition_time
         if transition_begin_time < RLPy.RTime(0):
             continue
         if cur_time >= transition_begin_time and cur_time < clip_start_time:
             find_clip = clip
     return find_clip, transition_time
Esempio n. 25
0
def undo_last_operation():
    # Disable the undo button after the undo operation
    cdz_ui["widget"].clearDollyZoom.setEnabled(False)

    if cdz_undo["camera"].IsValid() is False:
        RLPy.RUi.ShowMessageBox(
            "Camera Dolly Zoom - Error",
            "Clear operation cancelled - the camera can not be found.",
            RLPy.EMsgButton_Ok)
        return

    start_frame = RLPy.RTime.GetFrameIndex(cdz_undo["start_time"],
                                           cdz_undo["fps"])
    end_frame = start_frame + cdz_undo["frame_range"]
    end_time = RLPy.RTime().IndexedFrameTime(end_frame, cdz_undo["fps"])

    # Remove the Camera transform key
    t_control = cdz_undo["camera"].GetControl("Transform")
    t_control.RemoveKey(end_time)

    # Remove all DOF keys
    key = RLPy.RKey()
    key.SetTime(cdz_undo["start_time"])
    cdz_undo["camera"].RemoveDofKey(key)
    key.SetTime(end_time)
    cdz_undo["camera"].RemoveDofKey(key)

    # Remove all focus length keys
    start_focal_length = cdz_undo["camera"].GetFocalLength(
        cdz_undo["start_time"])

    for i in range(cdz_undo["frame_range"] + 1):
        time = RLPy.RTime().IndexedFrameTime(start_frame + i, cdz_undo["fps"])
        cdz_undo["camera"].RemoveFocalLengthKey(time)

    # Revert to original Focal Length settings
    cdz_undo["camera"].SetFocalLength(cdz_undo["start_time"],
                                      start_focal_length)

    # Revert to original DOF settings
    dof_key = RLPy.RKey()
    dof_key.SetTime(cdz_undo["start_time"])
    cdz_undo["camera"].AddDofKey(dof_key, cdz_undo["start_dof"])
Esempio n. 26
0
def transform_direction(world_transform, local_position):
    # Get the transform rotation 3x3 matrix
    world_rot_matrix = world_transform.Rotate()

    # New matrix4 for world direction
    world_dir = RLPy.RMatrix4()
    world_dir.MakeIdentity()
    world_dir.SetSR(world_rot_matrix)

    # New matrix for the local position
    point_world_matrix = RLPy.RMatrix4()
    point_world_matrix.MakeIdentity()
    point_world_matrix.SetTranslate(local_position)

    # Combine the 2 matrix4
    point_world_matrix = point_world_matrix * world_dir

    # Return the translation element of the combined matrix4
    return RLPy.RVector3(point_world_matrix.GetTranslate())
Esempio n. 27
0
def show_window():
    global cdz_ui

    if "dialog_window" in cdz_ui:  # If the window already exists...
        if cdz_ui["dialog_window"].IsVisible():
            RLPy.RUi.ShowMessageBox(
                "Camera Dolly Zoom - Operation Error",
                "The current Camera Dolly Zoom session is still running.  You must first close the window to start another session.",
                RLPy.EMsgButton_Ok)
        else:
            cdz_ui["dialog_window"].Show()
        return

    cdz_ui["dialog_window"] = RLPy.RUi.CreateRDialog()
    cdz_ui["dialog_window"].SetWindowTitle("Camera Dolly Zoom")

    # Create Pyside layout for RDialog
    dialog = wrapInstance(int(cdz_ui["dialog_window"].GetWindow()),
                          QtWidgets.QDialog)
    dialog.setFixedWidth(350)

    # Read and set the QT ui file from the script location
    qt_ui_file = QtCore.QFile(
        os.path.dirname(__file__) + "/Camera_Dolly_Zoom.ui")
    qt_ui_file.open(QtCore.QFile.ReadOnly)
    cdz_ui["widget"] = QtUiTools.QUiLoader().load(qt_ui_file)
    qt_ui_file.close()
    dialog.layout().addWidget(cdz_ui["widget"])

    # Connect button commands
    cdz_ui["widget"].help.clicked.connect(show_help_dialog)
    cdz_ui["widget"].keyDollyZoom.clicked.connect(key_camera_distance)
    cdz_ui["widget"].clearDollyZoom.clicked.connect(undo_last_operation)
    cdz_ui["widget"].focalLength.valueChanged.connect(
        update_focal_length_slider)
    cdz_ui["widget"].focalLengthSlider.valueChanged.connect(
        update_focal_length)

    cdz_ui["dialog_window"].Show()

    # Register callbacks
    cdz_callbacks["dialog_events"] = DialogEventCallback()
    cdz_ui["dialog_window"].RegisterEventCallback(
        cdz_callbacks["dialog_events"])
    cdz_callbacks["timer"] = RLPy.RPyTimer()
    cdz_callbacks["timer"].SetInterval(
        17
    )  # Every frame of iClone is 16.66667 ms, which is an interval of 16 - 17
    cdz_callbacks["timer_callback"] = TimerCallback()
    cdz_callbacks["timer"].RegisterPyTimerCallback(
        cdz_callbacks["timer_callback"])
    cdz_callbacks["timer"].Start()

    update_ui()
Esempio n. 28
0
def BoneWorldTransform(bone):
    world_transform = bone.WorldTransform()
    rot_matrix = world_transform.Rotate()
    translate = world_transform.T()

    rx = ry = rz = 0
    euler_angle_result = rot_matrix.ToEulerAngle(RLPy.EEulerOrder_XYZ, rx, ry,
                                                 rz)
    euler_angle = RLPy.RVector3(
        euler_angle_result[0] * RLPy.RMath.CONST_RAD_TO_DEG,
        euler_angle_result[1] * RLPy.RMath.CONST_RAD_TO_DEG,
        euler_angle_result[2] * RLPy.RMath.CONST_RAD_TO_DEG)
    return translate, euler_angle  # angle (degree)
Esempio n. 29
0
def local_to_world_translate(obj, local_pos):
    transform = obj.WorldTransform()
    transform_matrix = transform.Matrix()
    transform_matrix.SetTranslate(RLPy.RVector3.ZERO)

    local_matrix = RLPy.RMatrix4()
    local_matrix.MakeIdentity()
    local_matrix.SetTranslate(local_pos)

    # Get world-space position by multiplying local-space with the transform-space
    world_matrix = local_matrix * transform_matrix

    return world_matrix.GetTranslate()
Esempio n. 30
0
    def stop(self):
        control = self.key_prop.GetControl("Transform")
        prop_transform = self.key_prop.LocalTransform()
        
        prop_transform.R().SetX(0)
        prop_transform.R().SetY(0)
        prop_transform.R().SetZ(0)
        prop_transform.R().SetW(0)

        key = RLPy.RTransformKey()
        key.SetTime(RLPy.RGlobal.GetTime())
        key.SetTransform(prop_transform)
        control.AddKey(key, RLPy.RGlobal.GetFps())