Ejemplo n.º 1
0
def test_qinverse():
    # Takes sequence
    iq = tq.qinverse((1, 0, 0, 0))
    # Returns float type
    assert iq.dtype.kind == 'f'
    for M, q in eg_pairs:
        iq = tq.qinverse(q)
        iqM = tq.quat2mat(iq)
        iM = np.linalg.inv(M)
        assert np.allclose(iM, iqM)
Ejemplo n.º 2
0
def _distance_from_fixed_angle(angle, fixed_angle):
    """Designed to be mapped, this function finds the smallest rotation between
    two rotations. It assumes a no-symmettry system.

    The algorithm involves converting angles to quarternions, then finding the
    appropriate misorientation. It is tested against the slower more complete
    version finding the joining rotation.

    Parameters
    ----------
    angle : np.array()
        Euler angles representing rotation of interest.
    fixed_angle : np.array()
        Euler angles representing fixed reference rotation.

    Returns
    -------
    theta : np.array()
        Rotation angle between angle and fixed_angle.

    """
    angle = angle[0]
    q_data = euler2quat(*np.deg2rad(angle), axes="rzxz")
    q_fixed = euler2quat(*np.deg2rad(fixed_angle), axes="rzxz")
    if np.abs(2 * (np.dot(q_data, q_fixed))**2 - 1) < 1:  # arcos will work
        # https://math.stackexchange.com/questions/90081/quaternion-distance
        theta = np.arccos(2 * (np.dot(q_data, q_fixed))**2 - 1)
    else:  # slower, but also good
        q_from_mode = qmult(qinverse(q_fixed), q_data)
        axis, theta = quat2axangle(q_from_mode)
        theta = np.abs(theta)

    return np.rad2deg(theta)
Ejemplo n.º 3
0
    def diff_drive2(self, robot, index, target_pose, js1, joint_target, js2):
        """
        This is a hackier version of the diff_drive
        It uses specified joints to achieve the target pose of the end effector
        while asking some other specified joints to match a global pose
        """
        pf = robot.get_compute_functions()['passive_force'](True, True, False)
        max_v = 0.1
        max_w = np.pi
        qpos, qvel, poses = robot.get_observation()
        current_pose: Pose = poses[index]
        delta_p = target_pose.p - current_pose.p
        delta_q = qmult(target_pose.q, qinverse(current_pose.q))

        axis, theta = quat2axangle(delta_q)
        if (theta > np.pi):
            theta -= np.pi * 2

        t1 = np.linalg.norm(delta_p) / max_v
        t2 = theta / max_w
        t = max(np.abs(t1), np.abs(t2), 0.001)
        thres = 0.1
        if t < thres:
            k = (np.exp(thres) - 1) / thres
            t = np.log(k * t + 1)
        v = delta_p / t
        w = theta / t * axis
        target_qvel = robot.get_compute_functions()['cartesian_diff_ik'](
            np.concatenate((v, w)), 9)
        for j, target in zip(js2, joint_target):
            qpos[j] = target
        robot.set_action(qpos, target_qvel, pf)
Ejemplo n.º 4
0
    def diff_drive(self, robot, index, target_pose):
        """
        this diff drive is very hacky
        it tries to transport the target pose to match an end pose
        by computing the pose difference between current pose and target pose
        then it estimates a cartesian velocity for the end effector to follow.
        It uses differential IK to compute the required joint velocity, and set
        the joint velocity as current step target velocity.
        This technique makes the trajectory very unstable but it still works some times.
        """
        pf = robot.get_compute_functions()['passive_force'](True, True, False)
        max_v = 0.1
        max_w = np.pi
        qpos, qvel, poses = robot.get_observation()
        current_pose: Pose = poses[index]
        delta_p = target_pose.p - current_pose.p
        delta_q = qmult(target_pose.q, qinverse(current_pose.q))

        axis, theta = quat2axangle(delta_q)
        if (theta > np.pi):
            theta -= np.pi * 2

        t1 = np.linalg.norm(delta_p) / max_v
        t2 = theta / max_w
        t = max(np.abs(t1), np.abs(t2), 0.001)
        thres = 0.1
        if t < thres:
            k = (np.exp(thres) - 1) / thres
            t = np.log(k * t + 1)
        v = delta_p / t
        w = theta / t * axis
        target_qvel = robot.get_compute_functions()['cartesian_diff_ik'](
            np.concatenate((v, w)), 9)
        robot.set_action(qpos, target_qvel, pf)
Ejemplo n.º 5
0
def getRelativeQuaternionAtoB(quat_a, quat_b):
    """
	Given two quaternions A and B, determines the quaternion that rotates A to B
	(Multiplying A by q_rel will give B)
	"""
    q_rel = qmult(qinverse(quat_a), quat_b)
    return q_rel
Ejemplo n.º 6
0
def obtain_v_dqs(ndat, delta, q):
    out=np.zeros((ndat,3))
    for i in range(ndat):
        j=i+delta
        #Since everything is squared does quat_reduce matter? ...no, but do so anyway.
        dq=qs.quat_reduce(qops.qmult( qops.qinverse(q[i]), q[j]))
        out[i]=dq[1:4]
    return out
Ejemplo n.º 7
0
def compute_imu_orientation_from_world(robot_quat_in_world):
    # imu orientation has roll and pitch relative to gravity vector. yaw in world frame
    # get global yaw
    yrp_world_frame = quat2euler(robot_quat_in_world, axes='szxy')
    # remove global yaw rotation from roll and pitch
    yaw_quat = euler2quat(yrp_world_frame[0], 0, 0, axes='szxy')
    rp = rotate_vector((yrp_world_frame[1], yrp_world_frame[2], 0), qinverse(yaw_quat))
    # save in correct order
    return [rp[0], rp[1], 0], yaw_quat
Ejemplo n.º 8
0
    def residuals(self, poses, vos, L_ax, L_aq, L_rx, L_rq):
        """
        computes the residuals
        :param poses: N x 7
        :param vos: (N-1) x 7
        :param L_ax: 3 x 3
        :param L_aq: 4 x 4
        :param L_rx: 3 x 3
        :param L_rq: 4 x 4
        :return:
        """
        r = np.zeros((0, 1))

        # unary residuals
        L = np.zeros((7, 7))
        L[:3, :3] = L_ax
        L[3:, 3:] = L_aq
        for i in range(self.N):
            rr = self.z[7 * i:7 * (i + 1)] - np.reshape(poses[i], (-1, 1))
            r = np.vstack((r, np.dot(L, rr)))

        # pairwise residuals
        k = 0
        for i in range(self.N):
            for j in range(i + 1, self.N):
                # translation residual
                v = self.z[7 * j:7 * j + 3, 0] - self.z[7 * i:7 * i + 3, 0]
                q = txq.qinverse(self.z[7 * i + 3:7 * i + 7, 0])
                rt = txq.rotate_vector(v, q)
                rt = rt[:, np.newaxis] - vos[k, :3].reshape((-1, 1))
                # rt = self.z[7*(i+1) : 7*(i+1)+3] - self.z[7*i : 7*i+3] - \
                #     vos[i, :3].reshape((-1, 1))
                r = np.vstack((r, np.dot(L_rx, rt)))

                # rotation residual
                q0 = self.z[7 * i + 3:7 * i + 7].squeeze()
                q1 = self.z[7 * j + 3:7 * j + 7].squeeze()
                qvo = txq.qmult(txq.qinverse(q0), q1).reshape((-1, 1))
                rq = qvo - vos[k, 3:].reshape((-1, 1))
                r = np.vstack((r, np.dot(L_rq, rq)))
                k += 1

        return r
Ejemplo n.º 9
0
 def get_allocentric_poses(self):
     poses = self.get_poses()
     poses_allocentric = []
     for pose in poses:
         dx = np.arctan2(pose[0], -pose[2])
         dy = np.arctan2(pose[1], -pose[2])
         quat = euler2quat(-dy, -dx, 0, axes='sxyz')
         quat = qmult(qinverse(quat), pose[3:])
         poses_allocentric.append(np.concatenate([pose[:3], quat]))
         #print(quat, pose[3:], pose[:3])
     return poses_allocentric
Ejemplo n.º 10
0
def get_distance_between_two_angles_longform(angle_1, angle_2):
    """
    Using the long form to find the distance between two angles in euler form
    """
    q1 = euler2quat(*np.deg2rad(angle_1), axes='rzxz')
    q2 = euler2quat(*np.deg2rad(angle_2), axes='rzxz')
    # now assume transform of the form MODAL then Something = TOTAL
    # so we want to calculate MODAL^{-1} TOTAL

    q_from_mode = qmult(qinverse(q2), q1)
    axis, angle = quat2axangle(q_from_mode)
    return np.rad2deg(angle)
Ejemplo n.º 11
0
    def rot_diff(self, other):
        '''Compute rotation btw frames

        Arguments:
            other {Frame} -- target frame

        Returns:
            np.array, float -- axis and angle
        '''

        dq = quaternions.qmult(other.q, quaternions.qinverse(self.q))
        axis, angle = quaternions.quat2axangle(dq)
        return axis, angle
Ejemplo n.º 12
0
 def get_imu_quaternion(self):
     # imu orientation has roll and pitch relative to gravity vector. yaw in world frame
     _, robot_quat_in_world = self.get_robot_pose()
     # change order to transform3d standard
     robot_quat_in_world = (robot_quat_in_world[3], robot_quat_in_world[0],
                            robot_quat_in_world[1], robot_quat_in_world[2])
     # get global yaw
     yrp_world_frame = quat2euler(robot_quat_in_world, axes='szxy')
     # remove global yaw rotation from roll and pitch
     yaw_quat = euler2quat(yrp_world_frame[0], 0, 0, axes='szxy')
     rp = rotate_vector((yrp_world_frame[1], yrp_world_frame[2], 0),
                        qinverse(yaw_quat))
     # save in correct order
     rpy = [rp[0], rp[1], 0]
     # convert to quaternion
     quat_wxyz = euler2quat(*rpy)
     # change order to ros standard
     return quat_wxyz[1], quat_wxyz[2], quat_wxyz[3], quat_wxyz[0]
Ejemplo n.º 13
0
def rotmatrix_to_quaternion(time, matrix, bInvert=False):
    """
    Converts a matrix to quaternion, with a reverse if necessary
    """

    nPts = len(time)
    if nPts != len(matrix):
        print >> sys.stderr, "= = = ERROR in rotmatrix_to_quaternion: lengths are not the same!"
        return

    out = np.zeros((5, nPts))
    for i in range(nPts):
        out[0, i] = time[i]
        if bInvert:
            out[1:5, i] = qops.qinverse(qops.mat2quat(matrix[i]))
        else:
            out[1:5, i] = qops.mat2quat(matrix[i])
    return out
    def diff_drive(self,
                   robot: PandaArm,
                   index,
                   target_pose,
                   max_v=0.1,
                   max_w=np.pi,
                   active_joints_ids=[],
                   override=None):
        obs = robot.observation
        qpos, qvel, poses = obs['qpos'], obs['qvel'], obs['poses']
        current_pose: Pose = poses[index]
        delta_p = target_pose.p - current_pose.p
        delta_q = quat.qmult(target_pose.q, quat.qinverse(current_pose.q))

        axis, theta = quat.quat2axangle(delta_q)
        if theta > np.pi:
            theta -= np.pi * 2

        t1 = np.linalg.norm(delta_p) / max_v
        t2 = theta / max_w
        t = max(np.abs(t1), np.abs(t2), 0.01)
        thres = 0.1
        if t < thres:
            k = (np.exp(thres) - 1) / thres
            t = np.log(k * t + 1)
        v = delta_p / t
        w = theta / t * axis
        index = index if index >= 0 else len(robot._robot.get_joints()) + index
        cal_qvel = robot.get_compute_functions()['cartesian_diff_ik'](
            np.concatenate((v, w)), index, active_joints_ids)
        target_qvel = np.zeros_like(qvel)
        indx = np.arange(
            len(qvel)) if active_joints_ids == [] else active_joints_ids
        target_qvel[indx] = cal_qvel

        pf = robot.get_compute_functions()['passive_force']()

        if override is not None:
            override_indx, override_qpos, override_qvel = override
            qpos[override_indx] = override_qpos
            target_qvel[override_indx] = override_qvel

        robot.set_action(qpos, target_qvel, pf)
Ejemplo n.º 15
0
def optimize_poses(pred_poses,
                   vos=None,
                   fc_vos=False,
                   target_poses=None,
                   sax=1,
                   saq=1,
                   srx=1,
                   srq=1):
    """
    optimizes poses using either the VOs or the target poses (calculates VOs
    from them)
    :param pred_poses: N x 7
    :param vos: (N-1) x 7
    :param fc_vos: whether to use relative transforms between all frames in a fully
    connected manner, not just consecutive frames
    :param target_poses: N x 7
    :param: sax: covariance of pose translation (1 number)
    :param: saq: covariance of pose rotation (1 number)
    :param: srx: covariance of VO translation (1 number)
    :param: srq: covariance of VO rotation (1 number)
    :return:
    """
    pgo = PoseGraphFC() if fc_vos else PoseGraph()
    if vos is None:
        if target_poses is not None:
            # calculate the VOs (in the pred_poses frame)
            vos = np.zeros((len(target_poses) - 1, 7))
            for i in range(len(vos)):
                vos[i, :3] = target_poses[i + 1, :3] - target_poses[i, :3]
                q0 = target_poses[i, 3:]
                q1 = target_poses[i + 1, 3:]
                vos[i, 3:] = txq.qmult(txq.qinverse(q0), q1)
        else:
            print
            'Specify either VO or target poses'
            return None
    optim_poses = pgo.optimize(poses=pred_poses,
                               vos=vos,
                               sax=sax,
                               saq=saq,
                               srx=srx,
                               srq=srq)
    return optim_poses
Ejemplo n.º 16
0
def orientationEstimaton():
    #import the data
    filename = "imu/imuRaw1.mat"
    viconFileName = "vicon/viconRot1.mat"
    Ax, Ay, Az, Wx, Wy, Wz, ts = importData(filename)

    #intialize the P estimate covariance matrix (3x3)
    Pk_minus = np.zeros((3, 3))
    Pk_minus[0, 0] = 0.277
    Pk_minus[1, 1] = 0.277
    Pk_minus[2, 2] = 0.4

    #initialize a Q matrix (3x3)
    Q = np.diag(np.ones(3) * .1)

    #intialize state
    xk_minus = np.array([1, 0, 0, 0])

    #matrix for final output
    allEstimates = np.empty((len(Ax), 3))

    #run the measurement model for each of the pieces of data we have
    for i in range(0, len(Ax) - 1):
        epsilon = 5
        c = epsilon * np.eye(3)

        #find the S matrix (3x3)
        S = np.linalg.cholesky((2 * len(Q)) * (Pk_minus + Q + c))

        #create the Wi matrix
        Wi = np.hstack((S, -S))  #3x6

        #convert W to quaternion representation
        Xi = np.empty((6, 4))
        for j in range(0, len(Wi)):
            newQuat = axisAngleToQuat(Wi[j, :])
            Xi[j, :] = quat.qnorm(newQuat)

        #add qk-1 to the Xi
        for j in range(0, len(Xi)):
            if i == 0:
                xk_minusAsQuat = np.array([1, 0, 0, 0])
            else:
                xk_minusAsQuat = axisAngleToQuat(xk_minus)
            newQuat = quat.qmult(xk_minusAsQuat, Xi[j, :])
            Xi[j, :] = quat.qnorm(newQuat)

        #get the process model and convert Xi to Yi
        qDelta = processModel(Wx[i], Wy[i], Wz[i], ts[i + 1] - ts[i])
        Yi = np.empty((6, 4))
        for j in range(0, len(Xi)):
            newQuat = quat.qmult(Xi[j, :], qDelta)
            Yi[j, :] = quat.qnorm(newQuat)
        Yi = np.nan_to_num(Yi)

        #get the priori mean and covariance
        [xk_minus_quat, allErrorsX, averageErrorX] = findQuaternionMean(Xi)
        [n, _] = Xi.shape
        xk_minus = quatToAxisAngle(xk_minus_quat)

        #3x3 matrix in terms of axis angle r vectors
        Pk_minus = 1.0 / (2 * n) * np.dot(
            np.array(allErrorsX - averageErrorX).T,
            np.array(allErrorsX - averageErrorX))
        Pk_minus = np.nan_to_num(Pk_minus)

        #this is where the update model starts
        g = measurementModel(Ax[i], Ay[i], Az[i])
        Zi = np.empty((6, 4))
        for j in range(0, len(Yi)):
            vector = np.nan_to_num(Yi[j, :])
            if (np.array_equal(vector, np.array([0, 0, 0, 0]))):
                inverse = vector
            else:
                inverse = np.nan_to_num(quat.qinverse(vector))
            lastTwo = np.nan_to_num(quat.qmult(g, inverse))
            newQuat = np.nan_to_num(quat.qmult(vector, lastTwo))
            Zi[j, :] = np.nan_to_num(quat.qnorm(newQuat))
        #remove all the nans
        Zi = np.nan_to_num(Zi)

        #find the mean and covariance of Zi
        [zk_minus, allErrorsZ, averageErrorZ] = findQuaternionMean(Zi)

        # 6x6 matrix in terms of axis angle r vectors
        Pzz = 1.0 / (2 * n) * np.dot(
            np.array(allErrorsZ - averageErrorZ).T,
            np.array(allErrorsZ - averageErrorZ))

        #find the innovation vk
        zk_plus = np.array([Ax[i], Ay[i], Az[i]])
        zk_minusVector = quatToAxisAngle(zk_minus)
        vk = zk_plus - zk_minusVector

        #find the expected covariance
        R = np.diag(np.ones(3) * .40)
        Pvv = Pzz + R
        Pvv = np.nan_to_num(Pvv)

        #find Pxz, the cross-correlation matrix
        Pxz = 1.0 / (2 * n) * np.dot(
            np.array(allErrorsX - averageErrorX).T,
            np.array(allErrorsZ - averageErrorZ))
        Pxz = np.nan_to_num(Pxz)

        #find the Kalman gain matix
        Kk = Pxz * np.linalg.inv(Pvv)
        Kk = np.nan_to_num(Kk)

        #find the posteriori mean, which is the updated estimate of the state
        xk = xk_minus + np.dot(Kk, vk)
        xk = np.nan_to_num(xk)

        #find the posteriori variation, which is the updated variation
        Pk = Pk_minus - Kk * Pvv * Kk.T
        Pk = np.nan_to_num(Pk)

        #set the new xk to xk_minus and the new Pk to the Pk_minus
        xk_minus = xk
        Pk_minus = Pk

        #add to a matrix and return
        allEstimates[i] = xk_minus

    #visualize the results
    visualizeResults(allEstimates, viconFileName)

    #create the panorama using my own data
    rotsInMatrix = np.empty((3, 3, len(allEstimates)))
    for est in range(0, len(allEstimates)):
        rot = axisAngletoMat(allEstimates[est])
        rotsInMatrix[:, :, est] = rot
    createPanorama(rotsInMatrix, ts)

    return allEstimates
Ejemplo n.º 17
0
Copyright (C) 2018 NVIDIA Corporation.  All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""

import numpy as np
import transforms3d.quaternions as txq
import transforms3d.euler as txe

# predicted poses
tp1 = np.random.rand(3)
tp2 = np.random.rand(3)
qp1 = txe.euler2quat(*(2 * np.pi * np.random.rand(3)))
qp2 = txe.euler2quat(*(2 * np.pi * np.random.rand(3)))

# relatives
t_rel = txq.rotate_vector(v=tp2 - tp1, q=txq.qinverse(qp1))
q_rel = txq.qmult(txq.qinverse(qp1), qp2)

# vo poses
trand = np.random.rand(3)
qrand = txe.euler2quat(*(2 * np.pi * np.random.rand(3)))
tv1 = txq.rotate_vector(v=tp1, q=qrand)
qv1 = txq.qmult(qrand, qp1)
tv2 = txq.rotate_vector(v=t_rel, q=qv1) + tv1
qv2 = txq.qmult(qv1, q_rel)

# aligned vo
voq = txq.qmult(txq.qinverse(qv1), qv2)
vot = txq.rotate_vector(v=tv2 - tv1, q=txq.qinverse(qv1))
vot = txq.rotate_vector(v=vot, q=qp1)
Ejemplo n.º 18
0
q_A_C = quat.axangle2quat(r2_A, -theta2)
print("q_A_B: ")
print(q_A_B)
print("q_A_C: ")
print(q_A_C)

# 2. Vector coordinate transformation
xA_A = np.array([1.0, 0.0, 0.0])  # A's x axis in A
xA_B = quat.rotate_vector(xA_A, q_A_B)  # A's x axis in B
xA_C = quat.rotate_vector(xA_A, q_A_C)  # A's x axis in C
print("xA_B: ")
print(xA_B)
print("xA_C: ")
print(xA_C)

q_B_C = quat.qmult(q_A_C, quat.qinverse(q_A_B))
xB_B = np.array([1.0, 0.0, 0.0])  # B's x axis in B
xB_C = quat.rotate_vector(xB_B, q_B_C)  # B's x axis in C
print("q_B_C: ")
print(q_B_C)
print("xB_C: ")
print(xB_C)

# 3. Quatrenion and rotational matrix conversion
M_A_B_1 = quat.quat2mat(q_A_B)
M_A_B_2 = np.eye(3)
yA_B = np.array([0.0, -1.0, 0.0])
zA_B = np.array([0.0, 0.0, 1.0])
M_A_B_2[:, 0] = xA_B
M_A_B_2[:, 1] = yA_B
M_A_B_2[:, 2] = zA_B
Ejemplo n.º 19
0
def egocentric2allocentric(qt, T):
    dx = np.arctan2(T[0], -T[2])
    dy = np.arctan2(T[1], -T[2])
    quat = euler2quat(-dy, -dx, 0, axes='sxyz')
    quat = qmult(qinverse(quat), qt)
    return quat
Ejemplo n.º 20
0
def q2q_to_rot(q1, q2):
    q1inverted = qinverse(q1)
    ##hopefully this next line is equivalent to q2*q1' = r where r is the rotation quaternion`
    rotation_quaternion = qmult(q2, q1inverted)
    rotation_matrix = quat2mat(rotation_quaternion)
    return rotation_matrix
q_A_C = quat.axangle2quat(r2_A, -theta2)
print("q_A_B: ")
print(q_A_B)
print("q_A_C: ")
print(q_A_C)

# 2. Vector coordinate transformation
xA_A = np.array([1.0, 0.0, 0.0])  # A's x axis in A
xA_B = quat.rotate_vector(xA_A, q_A_B)  # A's x axis in B
xA_C = quat.rotate_vector(xA_A, q_A_C)  # A's x axis in C
print("xA_B: ")
print(xA_B)
print("xA_C: ")
print(xA_C)

q_B_C = quat.qmult(q_A_C, quat.qinverse(q_A_B))
xB_B = np.array([1.0, 0.0, 0.0])  # B's x axis in B
xB_C = quat.rotate_vector(xB_B, q_B_C)  # B's x axis in C
print("q_B_C: ")
print(q_B_C)
print("xB_C: ")
print(xB_C)

# 3. Quatrenion and rotational matrix conversion
M_A_B_1 = quat.quat2mat(q_A_B)
M_A_B_2 = np.eye(3)
yA_B = np.array([1.0, 0.0, 0.0])
zA_B = np.array([0.0, 0.0, 1.0])
M_A_B_2[:, 0] = xA_B
M_A_B_2[:, 1] = yA_B
M_A_B_2[:, 2] = zA_B