示例#1
0
def test_calc_orientation_forces(arm, orientation_algorithm):
    robot_config = arm.Config(use_cython=False)
    ctrlr = OSC(robot_config, orientation_algorithm=orientation_algorithm)

    for ii in range(100):
        q = np.random.random(robot_config.N_JOINTS) * 2 * np.pi

        # create a target orientation
        target_abg = np.random.random(3) * np.pi - np.pi / 2.0

        # calculate current position quaternion
        R = robot_config.R('EE', q=q)
        quat_1 = transformations.unit_vector(
            transformations.quaternion_from_matrix(R))

        # calculate current position quaternion with u_task added
        u_task = ctrlr._calc_orientation_forces(target_abg, q=q)

        dq = np.dot(
            np.linalg.pinv(robot_config.J('EE', q)),
            np.hstack([np.zeros(3), u_task]))
        q_2 = q - dq * 0.001  # where 0.001 represents the time step
        R_2 = robot_config.R('EE', q=q_2)
        quat_2 = transformations.unit_vector(
            transformations.quaternion_from_matrix(R_2))

        # calculate the quaternion of the target
        quat_target = transformations.unit_vector(
            transformations.quaternion_from_euler(
                target_abg[0], target_abg[1], target_abg[2], axes='rxyz'))

        dist1 = calc_distance(quat_1, np.copy(quat_target))
        dist2 = calc_distance(quat_2, np.copy(quat_target))

        assert np.abs(dist2) < np.abs(dist1)
示例#2
0
    def _calc_orientation_forces(self, target_abg, q):
        """Calculate the desired Euler angle forces to apply to the arm to
        move the end-effector to the target orientation

        Parameters
        ----------
        target_abg: np.array
            the target Euler angles orientation for the end-effector:
            alpha, beta, gamma
        q: np.array
            the joint angles of the arm
        """
        u_task_orientation = np.zeros(3)
        if self.orientation_algorithm == 0:
            # transform the orientation target into a quaternion
            q_d = transformations.unit_vector(
                transformations.quaternion_from_euler(
                    target_abg[0], target_abg[1], target_abg[2], axes="rxyz"
                )
            )
            # get the quaternion for the end effector
            q_e = self.robot_config.quaternion("EE", q=q)
            q_r = transformations.quaternion_multiply(
                q_d, transformations.quaternion_conjugate(q_e)
            )
            u_task_orientation = -q_r[1:] * np.sign(q_r[0])

        elif self.orientation_algorithm == 1:
            # From (Caccavale et al, 1997) Section IV Quaternion feedback
            # get rotation matrix for the end effector orientation
            R_e = self.robot_config.R("EE", q)
            # get rotation matrix for the target orientation
            R_d = transformations.euler_matrix(
                target_abg[0], target_abg[1], target_abg[2], axes="rxyz"
            )[:3, :3]
            R_ed = np.dot(R_e.T, R_d)  # eq 24
            q_ed = transformations.unit_vector(
                transformations.quaternion_from_matrix(R_ed)
            )
            u_task_orientation = -1 * np.dot(R_e, q_ed[1:])  # eq 34

        else:
            raise Exception(
                "Invalid algorithm number %i for calculating "
                % self.orientation_algorithm
                + "orientation error"
            )

        return u_task_orientation
示例#3
0
    def quaternion(self, name, q):
        """Gets orientation matrix and converts to a quaternion

        Parameters
        ----------
        name : string
            name of the joint, link, or end-effector
        q : numpy.array
            joint angles [radians]
        """
        R = self.R(name=name, q=q)
        quat = transformations.unit_vector(
            transformations.quaternion_from_matrix(matrix=R))
        return quat
示例#4
0
def test_calc_orientation_forces(arm, orientation_algorithm):
    robot_config = arm.Config(use_cython=False)
    ctrlr = OSC(robot_config, orientation_algorithm=orientation_algorithm)

    for ii in range(100):
        q = np.random.random(robot_config.N_JOINTS) * 2 * np.pi
        quat = robot_config.quaternion("EE", q=q)

        theta = np.pi / 2
        axis = np.array([0, 0, 1])
        quat_rot = transformations.unit_vector(
            np.hstack([np.cos(theta / 2),
                       np.sin(theta / 2) * axis]))
        quat_target = transformations.quaternion_multiply(quat, quat_rot)
        target_abg = transformations.euler_from_quaternion(quat_target,
                                                           axes="rxyz")

        # calculate current position quaternion
        R = robot_config.R("EE", q=q)
        quat_1 = transformations.unit_vector(
            transformations.quaternion_from_matrix(R))
        dist1 = calc_distance(quat_1, np.copy(quat_target))

        # calculate current position quaternion with u_task added
        u_task = ctrlr._calc_orientation_forces(target_abg, q=q)

        dq = np.dot(np.linalg.pinv(robot_config.J("EE", q)),
                    np.hstack([np.zeros(3), u_task]))
        q_2 = q - dq * 0.001  # where 0.001 represents the time step
        R_2 = robot_config.R("EE", q=q_2)
        quat_2 = transformations.unit_vector(
            transformations.quaternion_from_matrix(R_2))

        dist2 = calc_distance(quat_2, np.copy(quat_target))

        assert np.abs(dist2) < np.abs(dist1)
示例#5
0
    def generate_path(
        self,
        position,
        target_position,
        n_timesteps=200,
        dt=0.001,
        plot=False,
        method=3,
        axes="rxyz",
    ):
        """

        Parameters
        ----------
        position: numpy.array
            the current position of the system
        target_position: numpy.array
            the task space target position and orientation
        n_timesteps: int, optional (Default: 200)
            the number of time steps to reach the target_position
        dt: float, optional (Default: 0.001)
            the time step for calculating desired velocities [seconds]
        plot: boolean, optional (Default: False)
            plot the path after generating if True
        method: int, optional (Default: 3)
            Different ways to compute inverse resolved motion
            1. Standard resolved motion
            2. Dampened least squares method
            3. Nullspace with priority for position, orientation in null space
        axes: string, optional (Default: 'rxyz')
            the format of the Euler angles passed in target[3:6]
            First letter r or s represents 'relative' or 'static'
        """

        path = np.zeros((n_timesteps, position.shape[0] * 2))
        ee_track = []
        ee_err = []
        ea_err = []

        # set the largest allowable step in joint position
        max_dq = self.max_dq * dt
        # set the largest allowable step in hand (x,y,z)
        max_dx = self.max_dx * dt
        # set the largest allowable step in hand (alpha, beta, gamma)
        max_dr = self.max_dr * dt

        Qd = np.array(
            transformations.unit_vector(
                transformations.quaternion_from_euler(
                    target_position[3],
                    target_position[4],
                    target_position[5],
                    axes="sxyz",
                )
            )
        )

        q = np.copy(position)
        for ii in range(n_timesteps):
            J = self.robot_config.J("EE", q=q)
            Tx = self.robot_config.Tx("EE", q=q)
            ee_track.append(Tx)

            dx = target_position[:3] - Tx

            Qe = self.robot_config.quaternion("EE", q=q)
            # Method 4
            dr = Qe[0] * Qd[1:] - Qd[0] * Qe[1:] - np.cross(Qd[1:], Qe[1:])

            norm_dx = np.linalg.norm(dx, 2)
            norm_dr = np.linalg.norm(dr, 2)
            ee_err.append(norm_dx)
            ea_err.append(norm_dr)

            # limit max step size in operational space
            if norm_dx > max_dx:
                dx = dx / norm_dx * max_dx
            if norm_dr > max_dr:
                dr = dr / norm_dr * max_dr

            Jx = J[:3]
            pinv_Jx = np.linalg.pinv(Jx)

            # Different ways to compute inverse resolved motion
            if method == 1:
                # Standard resolved motion
                dq = np.dot(np.linalg.pinv(J), np.hstack([dx, dr]))
            if method == 2:
                # Dampened least squares method
                dq = np.dot(
                    J.T,
                    np.linalg.solve(
                        np.dot(J, J.T) + np.eye(6) * 0.001, np.hstack([dx, dr * 0.3])
                    ),
                )
            if method == 3:
                # Primary position IK, control orientation in null space
                dq = np.dot(pinv_Jx, dx) + np.dot(
                    np.eye(self.robot_config.N_JOINTS) - np.dot(pinv_Jx, Jx),
                    np.dot(np.linalg.pinv(J[3:]), dr),
                )

            # limit max step size in joint space
            if max(abs(dq)) > max_dq:
                dq = dq / max(abs(dq)) * max_dq

            path[ii] = np.hstack([q, dq])
            q = q + dq

        if plot:
            ee_track = np.array(ee_track)

            plt.subplot(2, 1, 1)
            plt.plot(ee_track)
            plt.gca().set_prop_cycle(None)
            plt.plot(np.ones((n_timesteps, 3)) * target_position[:3], "--")
            plt.legend(
                ["%i" % ii for ii in range(3)] + ["%i_target" % ii for ii in range(3)]
            )
            plt.title("Trajectory positions")

            plt.subplot(2, 1, 2)
            plt.plot(ee_err)
            plt.plot(ea_err)
            plt.legend(["Position error", "Orientation error"])
            plt.title("Trajectory orientations")

            plt.tight_layout()

            plt.show()
            plt.savefig("IK_plot.png")

        # reset position_path index
        self.n_timesteps = n_timesteps
        self.n = 0
        self.position_path = path[:, : self.robot_config.N_JOINTS]
        self.velocity_path = path[:, self.robot_config.N_JOINTS :]

        return self.position_path, self.velocity_path
            Mx = np.linalg.inv(Mx_inv)
        else:
            # using the rcond to set singular values < thresh to 0
            # singular values < (rcond * max(singular_values)) set to 0
            Mx = np.linalg.pinv(Mx_inv, rcond=.005)

        u_task = np.zeros(6)  # [x, y, z, alpha, beta, gamma]

        # calculate position error
        u_task[:3] = -kp * (hand_xyz - target[:3])

        # Method 1 ------------------------------------------------------------
        #
        # transform the orientation target into a quaternion
        q_d = transformations.unit_vector(
            transformations.quaternion_from_euler(
                target[3], target[4], target[5], axes='rxyz'))

        # get the quaternion for the end effector
        q_e = transformations.unit_vector(
            transformations.quaternion_from_matrix(
                robot_config.R('EE', feedback['q'])))
        # calculate the rotation from the end-effector to target orientation
        q_r = transformations.quaternion_multiply(
            q_d, transformations.quaternion_conjugate(q_e))
        u_task[3:] = q_r[1:] * np.sign(q_r[0])


        # Method 2 ------------------------------------------------------------
        # From (Caccavale et al, 1997) Section IV - Quaternion feedback -------
        #
)

# set up lists for tracking data
q_track = []
target_track = []
error_track = []

target_geom_id = interface.sim.model.geom_name2id("target")
green = [0, 0.9, 0, 0.5]
red = [0.9, 0, 0, 0.5]
threshold = 0.002  # threshold distance for being within target before moving on

# get the end-effector's initial position
np.random.seed(0)
target_quaternions = [
    transformations.unit_vector(transformations.random_quaternion())
    for ii in range(4)
]

target_index = 0
target = target_quaternions[target_index]
print(target)
target_xyz = robot_config.Tx("EE", q=target)
interface.set_mocap_xyz(name="target", xyz=target_xyz)

try:
    # get the end-effector's initial position
    feedback = interface.get_feedback()
    start = robot_config.Tx("EE", feedback["q"])

    count = 0.0
示例#8
0
            Mx = np.linalg.inv(Mx_inv)
        else:
            # using the rcond to set singular values < thresh to 0
            # singular values < (rcond * max(singular_values)) set to 0
            Mx = np.linalg.pinv(Mx_inv, rcond=.005)

        u_task = np.zeros(6)  # [x, y, z, alpha, beta, gamma]

        # calculate position error
        u_task[:3] = -kp * (hand_xyz - target[:3])

        # Method 1 ------------------------------------------------------------
        #
        # transform the orientation target into a quaternion
        q_d = transformations.unit_vector(
            transformations.quaternion_from_euler(
                target[3], target[4], target[5], axes='rxyz'))

        # get the quaternion for the end effector
        q_e = transformations.unit_vector(
            transformations.quaternion_from_matrix(
                robot_config.R('EE', feedback['q'])))
        # calculate the rotation from the end-effector to target orientation
        q_r = transformations.quaternion_multiply(
            q_d, transformations.quaternion_conjugate(q_e))
        u_task[3:] = q_r[1:] * np.sign(q_r[0])


        # Method 2 ------------------------------------------------------------
        # From (Caccavale et al, 1997) Section IV - Quaternion feedback -------
        #