def toPose(position, quaternion): quaternion = np.asarray(quaternion) quaternion = quaternion / np.linalg.norm(quaternion) R = quat.quat2rotmat(quaternion) T = np.eye(4) T[:3, :3] = R T[:3, 3] = np.asarray(position).transpose() return T
# inputs # from body frame to world q_wb = [0.042773,0.816907,-0.063879,0.571623] # w, x, y, z # from random camera (r) to reference camera (c) q_cr = [1.0, 0.0, 0.0, 0.0] g_r = [0.155649, 9.290025, 3.147590] T_bc = [ 0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975, 0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768, -0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949, 0.0, 0.0, 0.0, 1.0 ] g_absolute = np.asarray([0, 0, -9.8]) R_wb = quat.quat2rotmat(q_wb) R_cr = quat.quat2rotmat(q_cr) g_r = np.asarray(g_r).transpose() T_bc = np.asarray(T_bc).reshape(4, 4) g_world = np.dot(R_wb, np.dot(T_bc[:3, :3], np.dot(R_cr, g_r) ) ) print g_world print np.arccos( np.dot(g_world/np.linalg.norm(g_world), g_absolute/np.linalg.norm(g_absolute)) ) * 180 / pi
def calc_QPos(R_initialOrientation, omega, initialPosition, accMeasured, rate): ''' Reconstruct position and orientation, from angular velocity and linear acceleration. Assumes a start in a stationary position. No compensation for drift. Parameters ---------- omega : ndarray(N,3) Angular velocity, in [rad/s] accMeasured : ndarray(N,3) Linear acceleration, in [m/s^2] initialPosition : ndarray(3,) initial Position, in [m] R_initialOrientation: ndarray(3,3) Rotation matrix describing the initial orientation of the sensor, except a mis-orienation with respect to gravity rate : float sampling rate, in [Hz] Returns ------- q : ndarray(N,3) Orientation, expressed as a quaternion vector pos : ndarray(N,3) Position in space [m] Example ------- >>> q1, pos1 = calc_QPos(R_initialOrientation, omega, initialPosition, acc, rate) ''' # Transform recordings to angVel/acceleration in space -------------- # Orientation of \vec{g} with the sensor in the "R_initialOrientation" g = 9.81 g0 = np.linalg.inv(R_initialOrientation).dot(r_[0, 0, g]) # for the remaining deviation, assume the shortest rotation to there q0 = vector.qrotate(accMeasured[0], g0) R0 = quat.quat2rotmat(q0) # combine the two, to form a reference orientation. Note that the sequence # is very important! R_ref = R_initialOrientation.dot(R0) q_ref = quat.rotmat2quat(R_ref) # Calculate orientation q by "integrating" omega ----------------- q = quat.vel2quat(omega, q_ref, rate, 'bf') # Acceleration, velocity, and position ---------------------------- # From q and the measured acceleration, get the \frac{d^2x}{dt^2} g_v = r_[0, 0, g] accReSensor = accMeasured - vector.rotate_vector(g_v, quat.quatinv(q)) accReSpace = vector.rotate_vector(accReSensor, q) # Make the first position the reference position q = quat.quatmult(q, quat.quatinv(q[0])) # compensate for drift #drift = np.mean(accReSpace, 0) #accReSpace -= drift*0.7 # Position and Velocity through integration, assuming 0-velocity at t=0 vel = np.nan * np.ones_like(accReSpace) pos = np.nan * np.ones_like(accReSpace) for ii in range(accReSpace.shape[1]): vel[:, ii] = cumtrapz(accReSpace[:, ii], dx=1. / rate, initial=0) pos[:, ii] = cumtrapz(vel[:, ii], dx=1. / rate, initial=initialPosition[ii]) return (q, pos)
def calc_QPos(R_initialOrientation, omega, initialPosition, accMeasured, rate): ''' Reconstruct position and orientation, from angular velocity and linear acceleration. Assumes a start in a stationary position. No compensation for drift. Parameters ---------- omega : ndarray(N,3) Angular velocity, in [rad/s] accMeasured : ndarray(N,3) Linear acceleration, in [m/s^2] initialPosition : ndarray(3,) initial Position, in [m] R_initialOrientation: ndarray(3,3) Rotation matrix describing the initial orientation of the sensor, except a mis-orienation with respect to gravity rate : float sampling rate, in [Hz] Returns ------- q : ndarray(N,3) Orientation, expressed as a quaternion vector pos : ndarray(N,3) Position in space [m] Example ------- >>> q1, pos1 = calc_QPos(R_initialOrientation, omega, initialPosition, acc, rate) ''' # Transform recordings to angVel/acceleration in space -------------- # Orientation of \vec{g} with the sensor in the "R_initialOrientation" g = 9.81 g0 = np.linalg.inv(R_initialOrientation).dot(r_[0,0,g]) # for the remaining deviation, assume the shortest rotation to there q0 = vector.qrotate(accMeasured[0], g0) R0 = quat.quat2rotmat(q0) # combine the two, to form a reference orientation. Note that the sequence # is very important! R_ref = R_initialOrientation.dot(R0) q_ref = quat.rotmat2quat(R_ref) # Calculate orientation q by "integrating" omega ----------------- q = quat.vel2quat(omega, q_ref, rate, 'bf') # Acceleration, velocity, and position ---------------------------- # From q and the measured acceleration, get the \frac{d^2x}{dt^2} g_v = r_[0, 0, g] accReSensor = accMeasured - vector.rotate_vector(g_v, quat.quatinv(q)) accReSpace = vector.rotate_vector(accReSensor, q) # Make the first position the reference position q = quat.quatmult(q, quat.quatinv(q[0])) # compensate for drift #drift = np.mean(accReSpace, 0) #accReSpace -= drift*0.7 # Position and Velocity through integration, assuming 0-velocity at t=0 vel = np.nan*np.ones_like(accReSpace) pos = np.nan*np.ones_like(accReSpace) for ii in range(accReSpace.shape[1]): vel[:,ii] = cumtrapz(accReSpace[:,ii], dx=1./rate, initial=0) pos[:,ii] = cumtrapz(vel[:,ii], dx=1./rate, initial=initialPosition[ii]) return (q, pos)