def kalman_quat(rate, acc, omega, mag): ''' Calclulate the orientation from IMU magnetometer data. Parameters ---------- rate : float sample rate [Hz] acc : (N,3) ndarray linear acceleration [m/sec^2] omega : (N,3) ndarray angular velocity [rad/sec] mag : (N,3) ndarray magnetic field orientation Returns ------- qOut : (N,4) ndarray unit quaternion, describing the orientation relativ to the coordinate system spanned by the local magnetic field, and gravity Notes ----- Based on "Design, Implementation, and Experimental Results of a Quaternion-Based Kalman Filter for Human Body Motion Tracking" Yun, X. and Bachman, E.R., IEEE TRANSACTIONS ON ROBOTICS, VOL. 22, 1216-1227 (2006) ''' numData = len(acc) # Set parameters for Kalman Filter tstep = 1. / rate tau = [0.5, 0.5, 0.5] # from Yun, 2006 # Initializations x_k = np.zeros(7) # state vector z_k = np.zeros(7) # measurement vector z_k_pre = np.zeros(7) P_k = np.matrix(np.eye(7)) # error covariance matrix P_k Phi_k = np.matrix(np.zeros( (7, 7))) # discrete state transition matrix Phi_k for ii in range(3): Phi_k[ii, ii] = np.exp(-tstep / tau[ii]) H_k = np.matrix(np.eye(7)) # Identity matrix Q_k = np.matrix(np.zeros((7, 7))) # process noise matrix Q_k D = 0.0001 * r_[0.4, 0.4, 0.4] # [rad^2/sec^2]; from Yun, 2006 # check 0.0001 in Yun for ii in range(3): Q_k[ii, ii] = D[ii] / (2 * tau[ii]) * (1 - np.exp(-2 * tstep / tau[ii])) # Evaluate measurement noise covariance matrix R_k R_k = np.matrix(np.zeros((7, 7))) r_angvel = 0.01 # [rad**2/sec**2]; from Yun, 2006 r_quats = 0.0001 # from Yun, 2006 for ii in range(7): if ii < 3: R_k[ii, ii] = r_angvel else: R_k[ii, ii] = r_quats # Calculation of orientation for every time step qOut = np.zeros((numData, 4)) for ii in range(numData): accelVec = acc[ii, :] magVec = mag[ii, :] angvelVec = omega[ii, :] z_k_pre = z_k.copy( ) # watch out: by default, Python passes the reference!! # Evaluate quaternion based on acceleration and magnetic field data accelVec_n = vector.normalize(accelVec) magVec_hor = magVec - accelVec_n * accelVec_n.dot(magVec) magVec_n = vector.normalize(magVec_hor) basisVectors = np.vstack((magVec_n, np.cross(accelVec_n, magVec_n), accelVec_n)).T quatRef = quat.quatinv(quat.rotmat2quat(basisVectors)).flatten() # Update measurement vector z_k z_k[:3] = angvelVec z_k[3:] = quatRef # Calculate Kalman Gain # K_k = P_k * H_k.T * inv(H_k*P_k*H_k.T + R_k) # Check: why is H_k used in the original formulas? K_k = P_k * np.linalg.inv(P_k + R_k) # Update state vector x_k x_k += np.array(K_k.dot(z_k - z_k_pre)).ravel() # Evaluate discrete state transition matrix Phi_k Phi_k[3, :] = r_[-x_k[4] * tstep / 2, -x_k[5] * tstep / 2, -x_k[6] * tstep / 2, 1, -x_k[0] * tstep / 2, -x_k[1] * tstep / 2, -x_k[2] * tstep / 2] Phi_k[4, :] = r_[x_k[3] * tstep / 2, -x_k[6] * tstep / 2, x_k[5] * tstep / 2, x_k[0] * tstep / 2, 1, x_k[2] * tstep / 2, -x_k[1] * tstep / 2] Phi_k[5, :] = r_[x_k[6] * tstep / 2, x_k[3] * tstep / 2, -x_k[4] * tstep / 2, x_k[1] * tstep / 2, -x_k[2] * tstep / 2, 1, x_k[0] * tstep / 2] Phi_k[6, :] = r_[-x_k[5] * tstep / 2, x_k[4] * tstep / 2, x_k[3] * tstep / 2, x_k[2] * tstep / 2, x_k[1] * tstep / 2, -x_k[0] * tstep / 2, 1] # Update error covariance matrix #P_k = (eye(7)-K_k*H_k)*P_k # Check: why is H_k used in the original formulas? P_k = (H_k - K_k) * P_k # Projection of state quaternions x_k[3:] += quat.quatmult(0.5 * x_k[3:], r_[0, x_k[:3]]).flatten() x_k[3:] = vector.normalize(x_k[3:]) x_k[:3] = np.zeros(3) x_k[:3] = tstep * (-x_k[:3] + z_k[:3]) qOut[ii, :] = x_k[3:] # Projection of error covariance matrix P_k = Phi_k * P_k * Phi_k.T + Q_k # Make the first position the reference position qOut = quat.quatmult(qOut, quat.quatinv(qOut[0])) return qOut
def kalman_quat(rate, acc, omega, mag): ''' Calclulate the orientation from IMU magnetometer data. Parameters ---------- rate : float sample rate [Hz] acc : (N,3) ndarray linear acceleration [m/sec^2] omega : (N,3) ndarray angular velocity [rad/sec] mag : (N,3) ndarray magnetic field orientation Returns ------- qOut : (N,4) ndarray unit quaternion, describing the orientation relativ to the coordinate system spanned by the local magnetic field, and gravity Notes ----- Based on "Design, Implementation, and Experimental Results of a Quaternion-Based Kalman Filter for Human Body Motion Tracking" Yun, X. and Bachman, E.R., IEEE TRANSACTIONS ON ROBOTICS, VOL. 22, 1216-1227 (2006) ''' numData = len(acc) # Set parameters for Kalman Filter tstep = 1/rate tau = [0.5, 0.5, 0.5] # from Yun, 2006 # Initializations x_k = np.zeros(7) # state vector z_k = np.zeros(7) # measurement vector z_k_pre = np.zeros(7) P_k = np.matrix( np.eye(7) ) # error covariance matrix P_k Phi_k = np.matrix( np.zeros((7,7)) ) # discrete state transition matrix Phi_k for ii in range(3): Phi_k[ii,ii] = np.exp(-tstep/tau[ii]) H_k = np.matrix( np.eye(7) ) # Identity matrix Q_k = np.matrix( np.zeros((7,7)) ) # process noise matrix Q_k D = 0.0001*r_[0.4, 0.4, 0.4] # [rad^2/sec^2]; from Yun, 2006 # check 0.0001 in Yun for ii in range(3): Q_k[ii,ii] = D[ii]/(2*tau[ii]) * ( 1-np.exp(-2*tstep/tau[ii]) ) # Evaluate measurement noise covariance matrix R_k R_k = np.matrix( np.zeros((7,7)) ) r_angvel = 0.01; # [rad**2/sec**2]; from Yun, 2006 r_quats = 0.0001; # from Yun, 2006 for ii in range(7): if ii<3: R_k[ii,ii] = r_angvel else: R_k[ii,ii] = r_quats # Calculation of orientation for every time step qOut = np.zeros( (numData,4) ) for ii in range(numData): accelVec = acc[ii,:] magVec = mag[ii,:] angvelVec = omega[ii,:] z_k_pre = z_k.copy() # watch out: by default, Python passes the reference!! # Evaluate quaternion based on acceleration and magnetic field data accelVec_n = vector.normalize(accelVec) magVec_hor = magVec - accelVec_n * accelVec_n.dot(magVec) magVec_n = vector.normalize(magVec_hor) basisVectors = np.vstack( (magVec_n, np.cross(accelVec_n, magVec_n), accelVec_n) ).T quatRef = quat.quatinv(quat.rotmat2quat(basisVectors)).flatten() # Update measurement vector z_k z_k[:3] = angvelVec z_k[3:] = quatRef # Calculate Kalman Gain # K_k = P_k * H_k.T * inv(H_k*P_k*H_k.T + R_k) # Check: why is H_k used in the original formulas? K_k = P_k * np.linalg.inv(P_k + R_k) # Update state vector x_k x_k += np.array( K_k.dot(z_k-z_k_pre) ).ravel() # Evaluate discrete state transition matrix Phi_k Phi_k[3,:] = r_[-x_k[4]*tstep/2, -x_k[5]*tstep/2, -x_k[6]*tstep/2, 1, -x_k[0]*tstep/2, -x_k[1]*tstep/2, -x_k[2]*tstep/2] Phi_k[4,:] = r_[ x_k[3]*tstep/2, -x_k[6]*tstep/2, x_k[5]*tstep/2, x_k[0]*tstep/2, 1, x_k[2]*tstep/2, -x_k[1]*tstep/2] Phi_k[5,:] = r_[ x_k[6]*tstep/2, x_k[3]*tstep/2, -x_k[4]*tstep/2, x_k[1]*tstep/2, -x_k[2]*tstep/2, 1, x_k[0]*tstep/2] Phi_k[6,:] = r_[-x_k[5]*tstep/2, x_k[4]*tstep/2, x_k[3]*tstep/2, x_k[2]*tstep/2, x_k[1]*tstep/2, -x_k[0]*tstep/2, 1] # Update error covariance matrix #P_k = (eye(7)-K_k*H_k)*P_k # Check: why is H_k used in the original formulas? P_k = (H_k - K_k) * P_k # Projection of state quaternions x_k[3:] += quat.quatmult(0.5*x_k[3:],r_[0, x_k[:3]]).flatten() x_k[3:] = vector.normalize( x_k[3:] ) x_k[:3] = np.zeros(3) x_k[:3] = tstep * (-x_k[:3]+z_k[:3]) qOut[ii,:] = x_k[3:] # Projection of error covariance matrix P_k = Phi_k * P_k * Phi_k.T + Q_k # Make the first position the reference position qOut = quat.quatmult(qOut, quat.quatinv(qOut[0])) return qOut
def calc_QPos(R_initialOrientation, omega, initialPosition, accMeasured, rate): ''' Reconstruct position and orientation, from angular velocity and linear acceleration. Assumes a start in a stationary position. No compensation for drift. Parameters ---------- omega : ndarray(N,3) Angular velocity, in [rad/s] accMeasured : ndarray(N,3) Linear acceleration, in [m/s^2] initialPosition : ndarray(3,) initial Position, in [m] R_initialOrientation: ndarray(3,3) Rotation matrix describing the initial orientation of the sensor, except a mis-orienation with respect to gravity rate : float sampling rate, in [Hz] Returns ------- q : ndarray(N,3) Orientation, expressed as a quaternion vector pos : ndarray(N,3) Position in space [m] Example ------- >>> q1, pos1 = calc_QPos(R_initialOrientation, omega, initialPosition, acc, rate) ''' # Transform recordings to angVel/acceleration in space -------------- # Orientation of \vec{g} with the sensor in the "R_initialOrientation" g = 9.81 g0 = np.linalg.inv(R_initialOrientation).dot(r_[0, 0, g]) # for the remaining deviation, assume the shortest rotation to there q0 = vector.qrotate(accMeasured[0], g0) R0 = quat.quat2rotmat(q0) # combine the two, to form a reference orientation. Note that the sequence # is very important! R_ref = R_initialOrientation.dot(R0) q_ref = quat.rotmat2quat(R_ref) # Calculate orientation q by "integrating" omega ----------------- q = quat.vel2quat(omega, q_ref, rate, 'bf') # Acceleration, velocity, and position ---------------------------- # From q and the measured acceleration, get the \frac{d^2x}{dt^2} g_v = r_[0, 0, g] accReSensor = accMeasured - vector.rotate_vector(g_v, quat.quatinv(q)) accReSpace = vector.rotate_vector(accReSensor, q) # Make the first position the reference position q = quat.quatmult(q, quat.quatinv(q[0])) # compensate for drift #drift = np.mean(accReSpace, 0) #accReSpace -= drift*0.7 # Position and Velocity through integration, assuming 0-velocity at t=0 vel = np.nan * np.ones_like(accReSpace) pos = np.nan * np.ones_like(accReSpace) for ii in range(accReSpace.shape[1]): vel[:, ii] = cumtrapz(accReSpace[:, ii], dx=1. / rate, initial=0) pos[:, ii] = cumtrapz(vel[:, ii], dx=1. / rate, initial=initialPosition[ii]) return (q, pos)
def calc_QPos(R_initialOrientation, omega, initialPosition, accMeasured, rate): ''' Reconstruct position and orientation, from angular velocity and linear acceleration. Assumes a start in a stationary position. No compensation for drift. Parameters ---------- omega : ndarray(N,3) Angular velocity, in [rad/s] accMeasured : ndarray(N,3) Linear acceleration, in [m/s^2] initialPosition : ndarray(3,) initial Position, in [m] R_initialOrientation: ndarray(3,3) Rotation matrix describing the initial orientation of the sensor, except a mis-orienation with respect to gravity rate : float sampling rate, in [Hz] Returns ------- q : ndarray(N,3) Orientation, expressed as a quaternion vector pos : ndarray(N,3) Position in space [m] Example ------- >>> q1, pos1 = calc_QPos(R_initialOrientation, omega, initialPosition, acc, rate) ''' # Transform recordings to angVel/acceleration in space -------------- # Orientation of \vec{g} with the sensor in the "R_initialOrientation" g = 9.81 g0 = np.linalg.inv(R_initialOrientation).dot(r_[0,0,g]) # for the remaining deviation, assume the shortest rotation to there q0 = vector.qrotate(accMeasured[0], g0) R0 = quat.quat2rotmat(q0) # combine the two, to form a reference orientation. Note that the sequence # is very important! R_ref = R_initialOrientation.dot(R0) q_ref = quat.rotmat2quat(R_ref) # Calculate orientation q by "integrating" omega ----------------- q = quat.vel2quat(omega, q_ref, rate, 'bf') # Acceleration, velocity, and position ---------------------------- # From q and the measured acceleration, get the \frac{d^2x}{dt^2} g_v = r_[0, 0, g] accReSensor = accMeasured - vector.rotate_vector(g_v, quat.quatinv(q)) accReSpace = vector.rotate_vector(accReSensor, q) # Make the first position the reference position q = quat.quatmult(q, quat.quatinv(q[0])) # compensate for drift #drift = np.mean(accReSpace, 0) #accReSpace -= drift*0.7 # Position and Velocity through integration, assuming 0-velocity at t=0 vel = np.nan*np.ones_like(accReSpace) pos = np.nan*np.ones_like(accReSpace) for ii in range(accReSpace.shape[1]): vel[:,ii] = cumtrapz(accReSpace[:,ii], dx=1./rate, initial=0) pos[:,ii] = cumtrapz(vel[:,ii], dx=1./rate, initial=initialPosition[ii]) return (q, pos)