def test_q_inv(self): result = quat.q_mult(self.qz, quat.q_inv(self.qz)) correct = array([1., 0., 0., 0.]) error = norm(result - correct) self.assertTrue(error < self.delta) result = quat.q_mult(self.quatMat, quat.q_inv(self.quatMat)) correct = array([[1., 0., 0., 0.], [1., 0., 0., 0.]]) error = norm(result - correct) self.assertTrue(error < self.delta) result = quat.q_mult(self.q3x, quat.q_inv(self.q3x)) correct = array([0., 0., 0.]) error = norm(result - correct) self.assertTrue(error < self.delta)
def test_q_inv(self): result = quat.q_mult(self.qz, quat.q_inv(self.qz)) correct = array([ 1., 0., 0., 0.]) error = norm(result - correct) self.assertTrue(error < self.delta) result = quat.q_mult(self.quatMat, quat.q_inv(self.quatMat)) correct = array([[ 1., 0., 0., 0.], [ 1., 0., 0., 0.]]) error = norm(result - correct) self.assertTrue(error < self.delta) result = quat.q_mult(self.q3x, quat.q_inv(self.q3x)) correct = array([ 0., 0., 0.]) error = norm(result - correct) self.assertTrue(error < self.delta)
def calc_position(q, accel, initialVelocity, initialPosition, timeVector): g_v = np.r_[0, 0, constants.g] accReSensor = accel - vector.rotate_vector(g_v, quat.q_inv(q)) accReSpace = vector.rotate_vector(accReSensor, q) # Position and Velocity through integration, assuming 0-velocity at t=0 vel = np.nan * np.ones_like(accReSpace) pos = np.nan * np.ones_like(accReSpace) for ii in range(accReSpace.shape[1]): vel[:, ii] = cumtrapz(accReSpace[:, ii], x=timeVector, initial=initialVelocity[ii]) pos[:, ii] = cumtrapz(vel[:, ii], x=timeVector, initial=initialPosition[ii]) avevel = np.mean(vel, axis=0) aveaccel = np.mean(accel, axis=0) print("Average Accel: {}, Average Velocity: {}, Time taken: {}".format( np.sqrt(aveaccel[0]**2 + aveaccel[1]**2 + aveaccel[2]**2), np.sqrt(avevel[0]**2 + avevel[1]**2 + avevel[2]**2), timeVector[-1] * 1000)) return pos, vel
def calc_position(self): '''Calculate the position, assuming that the orientation is already known.''' initialPosition = self.pos_init # Acceleration, velocity, and position ---------------------------- # From q and the measured acceleration, get the \frac{d^2x}{dt^2} g = constants.g g_v = np.r_[0, 0, g] accReSensor = self.acc - vector.rotate_vector(g_v, quat.q_inv( self.quat)) accReSpace = vector.rotate_vector(accReSensor, self.quat) # Position and Velocity through integration, assuming 0-velocity at t=0 vel = np.nan * np.ones_like(accReSpace) pos = np.nan * np.ones_like(accReSpace) for ii in range(accReSpace.shape[1]): vel[:, ii] = cumtrapz(accReSpace[:, ii], dx=1. / np.float(self.rate), initial=0) pos[:, ii] = cumtrapz(vel[:, ii], dx=1. / np.float(self.rate), initial=initialPosition[ii]) self.pos = pos
def rotate_vector(vector, q): ''' Rotates a vector, according to the given quaternions. Note that a single vector can be rotated into many orientations; or a row of vectors can all be rotated by a single quaternion. Parameters ---------- vector : array, shape (3,) or (N,3) vector(s) to be rotated. q : array_like, shape ([3,4],) or (N,[3,4]) quaternions or quaternion vectors. Returns ------- rotated : array, shape (3,) or (N,3) rotated vector(s) .. image:: ../docs/Images/vector_rotate_vector.png :scale: 33% Notes ----- .. math:: q \\circ \\left( {\\vec x \\cdot \\vec I} \\right) \\circ {q^{ - 1}} = \\left( {{\\bf{R}} \\cdot \\vec x} \\right) \\cdot \\vec I More info under http://en.wikipedia.org/wiki/Quaternion Examples -------- >>> mymat = eye(3) >>> myVector = r_[1,0,0] >>> quats = array([[0,0, sin(0.1)],[0, sin(0.2), 0]]) >>> quat.rotate_vector(myVector, quats) array([[ 0.98006658, 0.19866933, 0. ], [ 0.92106099, 0. , -0.38941834]]) >>> quat.rotate_vector(mymat, [0, 0, sin(0.1)]) array([[ 0.98006658, 0.19866933, 0. ], [-0.19866933, 0.98006658, 0. ], [ 0. , 0. , 1. ]]) ''' vector = np.atleast_2d(vector) qvector = np.hstack((np.zeros((vector.shape[0], 1)), vector)) vRotated = quat.q_mult(q, quat.q_mult(qvector, quat.q_inv(q))) vRotated = vRotated[:, 1:] if min(vRotated.shape) == 1: vRotated = vRotated.ravel() return vRotated
def kalman(rate, acc, omega, mag, D=[0.4, 0.4, 0.4], tau=[0.5, 0.5, 0.5], Q_k=None, R_k=None, initialPosition=np.zeros(3), initialVelocity=np.zeros(3), initialOrientation=np.zeros(3), timeVector=np.zeros((5, 1)), accMeasured=np.column_stack((np.zeros((5, 2)), 9.81 * np.ones(5))), referenceOrientation=np.array([1., 0., 0., 0.])): ''' Calclulate the orientation from IMU magnetometer data. Parameters ---------- rate : float sample rate [Hz] acc : (N,3) ndarray linear acceleration [m/sec^2] omega : (N,3) ndarray angular velocity [rad/sec] mag : (N,3) ndarray magnetic field orientation D : (,3) ndarray noise variance, for x/y/z [rad^2/sec^2] parameter for tuning the filter; defaults from Yun et al. can also be entered as list tau : (,3) ndarray time constant for the process model, for x/y/z [sec] parameter for tuning the filter; defaults from Yun et al. can also be entered as list Q_k : None, or (7,7) ndarray covariance matrix of process noises parameter for tuning the filter If set to "None", the defaults from Yun et al. are taken! R_k : None, or (7,7) ndarray covariance matrix of measurement noises parameter for tuning the filter; defaults from Yun et al. If set to "None", the defaults from Yun et al. are taken! Returns ------- qOut : (N,4) ndarray unit quaternion, describing the orientation relativ to the coordinate system spanned by the local magnetic field, and gravity pos : (N,3) ndarray Position array vel : (N,3) ndarray Velocity Notes ----- Based on "Design, Implementation, and Experimental Results of a Quaternion- Based Kalman Filter for Human Body Motion Tracking" Yun, X. and Bachman, E.R., IEEE TRANSACTIONS ON ROBOTICS, VOL. 22, 1216-1227 (2006) ''' numData = len(acc) # Set parameters for Kalman Filter tstep = 1. / rate # check input assert len(tau) == 3 tau = np.array(tau) # Initializations x_k = np.zeros(7) # state vector z_k = np.zeros(7) # measurement vector z_k_pre = np.zeros(7) P_k = np.eye(7) # error covariance matrix P_k Phi_k = np.eye(7) # discrete state transition matrix Phi_k for ii in range(3): Phi_k[ii, ii] = np.exp(-tstep / tau[ii]) H_k = np.eye(7) # Identity matrix D = np.r_[0.4, 0.4, 0.4] # [rad^2/sec^2]; from Yun, 2006 if Q_k is None: # Set the default input, from Yun et al. Q_k = np.zeros((7, 7)) # process noise matrix Q_k for ii in range(3): Q_k[ii, ii] = D[ii] / (2 * tau[ii]) * (1 - np.exp(-2 * tstep / tau[ii])) else: # Check the shape of the input assert Q_k.shape == (7, 7) # Evaluate measurement noise covariance matrix R_k if R_k is None: # Set the default input, from Yun et al. r_angvel = 0.01 # [rad**2/sec**2]; from Yun, 2006 r_quats = 0.0001 # from Yun, 2006 r_ii = np.zeros(7) for ii in range(3): r_ii[ii] = r_angvel for ii in range(4): r_ii[ii + 3] = r_quats R_k = np.diag(r_ii) else: # Check the shape of the input assert R_k.shape == (7, 7) # Calculation of orientation for every time step qOut = np.zeros((numData, 4)) for ii in range(numData): accelVec = acc[ii, :] magVec = mag[ii, :] angvelVec = omega[ii, :] z_k_pre = z_k.copy( ) # watch out: by default, Python passes the reference!! # Evaluate quaternion based on acceleration and magnetic field data accelVec_n = vector.normalize(accelVec) magVec_hor = magVec - accelVec_n * (accelVec_n @ magVec) magVec_n = vector.normalize(magVec_hor) basisVectors = np.column_stack( [magVec_n, np.cross(accelVec_n, magVec_n), accelVec_n]) quatRef = quat.q_inv(rotmat.convert(basisVectors, to='quat')).ravel() # Calculate Kalman Gain # K_k = P_k * H_k.T * inv(H_k*P_k*H_k.T + R_k) K_k = P_k @ np.linalg.inv(P_k + R_k) # Update measurement vector z_k z_k[:3] = angvelVec z_k[3:] = quatRef # Update state vector x_k x_k += np.array(K_k @ (z_k - z_k_pre)).ravel() # Evaluate discrete state transition matrix Phi_k Delta = np.zeros((7, 7)) Delta[3, :] = np.r_[-x_k[4], -x_k[5], -x_k[6], 0, -x_k[0], -x_k[1], -x_k[2]] Delta[4, :] = np.r_[x_k[3], -x_k[6], x_k[5], x_k[0], 0, x_k[2], -x_k[1]] Delta[5, :] = np.r_[x_k[6], x_k[3], -x_k[4], x_k[1], -x_k[2], 0, x_k[0]] Delta[6, :] = np.r_[-x_k[5], x_k[4], x_k[3], x_k[2], x_k[1], -x_k[0], 0] Delta *= tstep / 2 Phi_k += Delta # Update error covariance matrix P_k = (np.eye(7) - K_k) @ P_k # Projection of state # 1) quaternions x_k[3:] += tstep * 0.5 * quat.q_mult(x_k[3:], np.r_[0, x_k[:3]]).ravel() x_k[3:] = vector.normalize(x_k[3:]) # 2) angular velocities x_k[:3] -= tstep * tau * x_k[:3] qOut[ii, :] = x_k[3:] # Projection of error covariance matrix P_k = Phi_k @ P_k @ Phi_k.T + Q_k # Calculate Position from Orientation # pos, vel = calc_position(qOut, acc, initialVelocity, initialPosition, timeVector) # Make the first position the reference position qOut = quat.q_mult(qOut, quat.q_inv(referenceOrientation)) return qOut
def analytical( R_initialOrientation=np.eye(3), omega=np.zeros((5, 3)), initialPosition=np.zeros(3), initialVelocity=np.zeros(3), accMeasured=np.column_stack((np.zeros((5, 2)), 9.81 * np.ones(5))), rate=100): ''' Reconstruct position and orientation with an analytical solution, from angular velocity and linear acceleration. Assumes a start in a stationary position. No compensation for drift. Parameters ---------- R_initialOrientation: ndarray(3,3) Rotation matrix describing the initial orientation of the sensor, except a mis-orienation with respect to gravity omega : ndarray(N,3) Angular velocity, in [rad/s] initialPosition : ndarray(3,) initial Position, in [m] accMeasured : ndarray(N,3) Linear acceleration, in [m/s^2] rate : float sampling rate, in [Hz] Returns ------- q : ndarray(N,3) Orientation, expressed as a quaternion vector pos : ndarray(N,3) Position in space [m] vel : ndarray(N,3) Velocity in space [m/s] Example ------- >>> q1, pos1 = analytical(R_initialOrientation, omega, initialPosition, acc, rate) ''' if omega.ndim == 1: raise ValueError('The input to "analytical" requires matrix inputs.') # Transform recordings to angVel/acceleration in space -------------- # Orientation of \vec{g} with the sensor in the "R_initialOrientation" g = constants.g g0 = np.linalg.inv(R_initialOrientation).dot(np.r_[0, 0, g]) # for the remaining deviation, assume the shortest rotation to there q0 = vector.q_shortest_rotation(accMeasured[0], g0) q_initial = rotmat.convert(R_initialOrientation, to='quat') # combine the two, to form a reference orientation. Note that the sequence # is very important! q_ref = quat.q_mult(q_initial, q0) # Calculate orientation q by "integrating" omega ----------------- q = quat.calc_quat(omega, q_ref, rate, 'bf') # Acceleration, velocity, and position ---------------------------- # From q and the measured acceleration, get the \frac{d^2x}{dt^2} g_v = np.r_[0, 0, g] accReSensor = accMeasured - vector.rotate_vector(g_v, quat.q_inv(q)) accReSpace = vector.rotate_vector(accReSensor, q) # Make the first position the reference position q = quat.q_mult(q, quat.q_inv(q[0])) # compensate for drift #drift = np.mean(accReSpace, 0) #accReSpace -= drift*0.7 # Position and Velocity through integration, assuming 0-velocity at t=0 vel = np.nan * np.ones_like(accReSpace) pos = np.nan * np.ones_like(accReSpace) for ii in range(accReSpace.shape[1]): vel[:, ii] = cumtrapz(accReSpace[:, ii], dx=1. / rate, initial=initialVelocity[ii]) pos[:, ii] = cumtrapz(vel[:, ii], dx=1. / rate, initial=initialPosition[ii]) return (q, pos, vel)
def kalman(rate, acc, omega, mag): ''' Calclulate the orientation from IMU magnetometer data. Parameters ---------- rate : float sample rate [Hz] acc : (N,3) ndarray linear acceleration [m/sec^2] omega : (N,3) ndarray angular velocity [rad/sec] mag : (N,3) ndarray magnetic field orientation Returns ------- qOut : (N,4) ndarray unit quaternion, describing the orientation relativ to the coordinate system spanned by the local magnetic field, and gravity Notes ----- Based on "Design, Implementation, and Experimental Results of a Quaternion-Based Kalman Filter for Human Body Motion Tracking" Yun, X. and Bachman, E.R., IEEE TRANSACTIONS ON ROBOTICS, VOL. 22, 1216-1227 (2006) ''' numData = len(acc) # Set parameters for Kalman Filter tstep = 1. / rate tau = [0.5, 0.5, 0.5] # from Yun, 2006 # Initializations x_k = np.zeros(7) # state vector z_k = np.zeros(7) # measurement vector z_k_pre = np.zeros(7) P_k = np.matrix(np.eye(7)) # error covariance matrix P_k Phi_k = np.matrix(np.zeros( (7, 7))) # discrete state transition matrix Phi_k for ii in range(3): Phi_k[ii, ii] = np.exp(-tstep / tau[ii]) H_k = np.matrix(np.eye(7)) # Identity matrix Q_k = np.matrix(np.zeros((7, 7))) # process noise matrix Q_k D = 0.0001 * np.r_[0.4, 0.4, 0.4] # [rad^2/sec^2]; from Yun, 2006 # check 0.0001 in Yun for ii in range(3): Q_k[ii, ii] = D[ii] / (2 * tau[ii]) * (1 - np.exp(-2 * tstep / tau[ii])) # Evaluate measurement noise covariance matrix R_k R_k = np.matrix(np.zeros((7, 7))) r_angvel = 0.01 # [rad**2/sec**2]; from Yun, 2006 r_quats = 0.0001 # from Yun, 2006 for ii in range(7): if ii < 3: R_k[ii, ii] = r_angvel else: R_k[ii, ii] = r_quats # Calculation of orientation for every time step qOut = np.zeros((numData, 4)) for ii in range(numData): accelVec = acc[ii, :] magVec = mag[ii, :] angvelVec = omega[ii, :] z_k_pre = z_k.copy( ) # watch out: by default, Python passes the reference!! # Evaluate quaternion based on acceleration and magnetic field data accelVec_n = vector.normalize(accelVec) magVec_hor = magVec - accelVec_n * accelVec_n.dot(magVec) magVec_n = vector.normalize(magVec_hor) basisVectors = np.vstack((magVec_n, np.cross(accelVec_n, magVec_n), accelVec_n)).T quatRef = quat.q_inv(quat.rotmat2quat(basisVectors)).flatten() # Update measurement vector z_k z_k[:3] = angvelVec z_k[3:] = quatRef # Calculate Kalman Gain # K_k = P_k * H_k.T * inv(H_k*P_k*H_k.T + R_k) # Check: why is H_k used in the original formulas? K_k = P_k * np.linalg.inv(P_k + R_k) # Update state vector x_k x_k += np.array(K_k.dot(z_k - z_k_pre)).ravel() # Evaluate discrete state transition matrix Phi_k Phi_k[3, :] = np.r_[-x_k[4] * tstep / 2, -x_k[5] * tstep / 2, -x_k[6] * tstep / 2, 1, -x_k[0] * tstep / 2, -x_k[1] * tstep / 2, -x_k[2] * tstep / 2] Phi_k[4, :] = np.r_[x_k[3] * tstep / 2, -x_k[6] * tstep / 2, x_k[5] * tstep / 2, x_k[0] * tstep / 2, 1, x_k[2] * tstep / 2, -x_k[1] * tstep / 2] Phi_k[5, :] = np.r_[x_k[6] * tstep / 2, x_k[3] * tstep / 2, -x_k[4] * tstep / 2, x_k[1] * tstep / 2, -x_k[2] * tstep / 2, 1, x_k[0] * tstep / 2] Phi_k[6, :] = np.r_[-x_k[5] * tstep / 2, x_k[4] * tstep / 2, x_k[3] * tstep / 2, x_k[2] * tstep / 2, x_k[1] * tstep / 2, -x_k[0] * tstep / 2, 1] # Update error covariance matrix #P_k = (eye(7)-K_k*H_k)*P_k # Check: why is H_k used in the original formulas? P_k = (H_k - K_k) * P_k # Projection of state quaternions x_k[3:] += quat.q_mult(0.5 * x_k[3:], np.r_[0, x_k[:3]]).flatten() x_k[3:] = vector.normalize(x_k[3:]) x_k[:3] = np.zeros(3) x_k[:3] = tstep * (-x_k[:3] + z_k[:3]) qOut[ii, :] = x_k[3:] # Projection of error covariance matrix P_k = Phi_k * P_k * Phi_k.T + Q_k # Make the first position the reference position qOut = quat.q_mult(qOut, quat.q_inv(qOut[0])) return qOut
print('Input:') pprint(q_vec) # Unit quaternion q_unit = quat.unit_q(q_vec) print('\nUnit quaternions:') pprint(q_unit) # Also add a non-unit quaternion q_non_unit = np.r_[1, 0, np.sin(alpha_rad[0] / 2), 0] q_data = np.vstack((q_unit, q_non_unit)) print('\nGeneral quaternions:') pprint(q_data) # Inversion q_inverted = quat.q_inv(q_data) print('\nInverted:') pprint(q_inverted) # Conjugation q_conj = quat.q_conj(q_data) print('\nConjugated:') pprint(q_conj) # Multiplication q_multiplied = quat.q_mult(q_data, q_data) print('\nMultiplied:') pprint(q_multiplied) # Scalar and vector part q_scalar = quat.q_scalar(q_data)