Beispiel #1
0
    def RediscretizeTrajectory(self, traj, num_between_points):
        or_points = len(traj)
        if (num_between_points < 1):
            return None

        new_traj = []
        for i in range(0, or_points - 1):
            cur_traj_point = traj[i]
            next_traj_point = traj[i + 1]

            old_pos = cur_traj_point[0:3, 3]
            next_pos = next_traj_point[0:3, 3]
            pos_diff = (next_pos - old_pos)

            or_quat = transformations.quaternion_from_matrix(cur_traj_point)
            next_quat = transformations.quaternion_from_matrix(next_traj_point)

            new_traj.append(np.copy(cur_traj_point))
            for j in range(0, num_between_points):
                fraction = float(j + 1) / float(num_between_points + 1)
                interp_quat = transformations.quaternion_slerp(
                    or_quat, next_quat, fraction)
                interp_pos = old_pos + (fraction * pos_diff)

                interp_trans = transformations.quaternion_translation_matrix(
                    interp_quat, interp_pos)
                new_traj.append(np.copy(interp_trans))
        new_traj.append(np.copy(traj[len(traj) - 1]))  #append last traj point

        return new_traj
Beispiel #2
0
    def update_all_transforms(self, all_frames):

        # positions = frames[0]
        # rotations = frames[1]
        positions = []
        rotations = []
        for f in all_frames:
            for i, p in enumerate(f[0]):
                positions.append(f[0][i])
                rotations.append(f[1][i])

        for c in self.collision_objects:
            if c.type == 'robot_link':
                name = c.name
                name_arr = name.split('_')
                arm_id = int(name_arr[1])
                link_id = int(name_arr[2])
                ptA = all_frames[arm_id][0][link_id]
                ptB = all_frames[arm_id][0][link_id + 1]
                midPt = ptA + 0.5 * (ptB - ptA)
                final_pos = midPt

                rot_mat = np.zeros((3, 3))
                z = ptB - ptA
                norm = max(np.linalg.norm(z), 0.000001)
                z = (1.0 / norm) * z
                up = np.array([0, 0, 1])
                if np.dot(z, up) == 1.0:
                    up = np.array([1, 0, 0])
                x = np.cross(up, z)
                y = np.cross(z, x)
                rot_mat[:, 0] = x
                rot_mat[:, 1] = y
                rot_mat[:, 2] = z

                final_quat = T.quaternion_from_matrix(rot_mat)
            else:
                coordinate_frame = c.coordinate_frame
                # first, do local transforms
                frame_len = len(positions)
                if coordinate_frame == 0:
                    rot_mat = rotations[0]
                    final_pos = positions[0]
                elif coordinate_frame >= frame_len:
                    rot_mat = rotations[frame_len - 1]
                    final_pos = positions[frame_len - 1]
                else:
                    rot_mat = rotations[coordinate_frame]
                    final_pos = positions[coordinate_frame - 1]

                final_quat = T.quaternion_from_matrix(rot_mat)

                local_translation = np.array(c.translation)
                rotated_local_translation = np.dot(rot_mat, local_translation)
                final_pos = final_pos + rotated_local_translation

                local_rotation = c.quaternion
                final_quat = T.quaternion_multiply(local_rotation, final_quat)

            c.update_transform(final_pos, final_quat)
Beispiel #3
0
def evaluate_R_t(R_gt, t_gt, R, t, q_gt=None):

    # from Utils.transformations import quaternion_from_matrix

    t = t.flatten()
    t_gt = t_gt.flatten()

    eps = 1e-15

    if q_gt is None:
        q_gt = quaternion_from_matrix(R_gt)
    q = quaternion_from_matrix(R)
    q = q / (np.linalg.norm(q) + eps)
    q_gt = q_gt / (np.linalg.norm(q_gt) + eps)
    loss_q = np.maximum(eps, (1.0 - np.sum(q * q_gt)**2))
    err_q = np.arccos(1 - 2 * loss_q)

    # dR = np.dot(R, R_gt.T)
    # dt = t - np.dot(dR, t_gt)
    # dR = np.dot(R, R_gt.T)
    # dt = t - t_gt
    t = t / (np.linalg.norm(t) + eps)
    t_gt = t_gt / (np.linalg.norm(t_gt) + eps)
    loss_t = np.maximum(eps, (1.0 - np.sum(t * t_gt)**2))
    err_t = np.arccos(np.sqrt(1 - loss_t))

    if np.sum(np.isnan(err_q)) or np.sum(np.isnan(err_t)):
        # This should never happen! Debug here
        import IPython
        IPython.embed()

    return err_q, err_t
    def __predict_nominal_state(self, imu_measurement: np.array):
        p = self.nominal_state[:3].reshape(-1, 1)
        q = self.nominal_state[3:7]
        v = self.nominal_state[7:10].reshape(-1, 1)
        a_b = self.nominal_state[10:13].reshape(-1, 1)
        w_b = self.nominal_state[13:16]
        g = self.nominal_state[16:19].reshape(-1, 1)

        w_m = imu_measurement[1:4].copy()
        a_m = imu_measurement[4:7].reshape(-1, 1).copy()
        dt = imu_measurement[0] - self.last_predict_time
        """
        dp/dt = v
        dv/dt = R(a_m - a_b) + g
        dq/dt = 0.5 * q x(quaternion product) (w_m - w_b)
        
        a_m and w_m are the measurements of IMU.
        a_b and w_b are biases of acc and gyro, respectively.
        R = R{q}, which bring the point from local coordinate to global coordinate.
        """
        w_m -= w_b
        a_m -= a_b

        # use the zero-order integration to integrate the quaternion.
        # q_{n+1} = q_n x q{(w_m - w_b) * dt}
        angle = la.norm(w_m)
        axis = w_m / angle
        R_w = tr.rotation_matrix(0.5 * dt * angle, axis)
        q_w = tr.quaternion_from_matrix(R_w, True)
        q_half_next = tr.quaternion_multiply(q, q_w)

        R_w = tr.rotation_matrix(dt * angle, axis)
        q_w = tr.quaternion_from_matrix(R_w, True)
        q_next = tr.quaternion_multiply(q, q_w)
        if q_next[0] < 0:  # force the quaternion has a positive real part.
            q_next *= -1

        # use RK4 method to integration velocity and position.
        # integrate velocity first.
        R = tr.quaternion_matrix(q)[:3, :3]
        R_half_next = tr.quaternion_matrix(q_half_next)[:3, :3]
        R_next = tr.quaternion_matrix(q_next)[:3, :3]
        v_k1 = R @ a_m + g
        v_k2 = R_half_next @ a_m + g
        # v_k3 = R_half_next @ a_m + g  # yes. v_k2 = v_k3.
        v_k3 = v_k2
        v_k4 = R_next @ a_m + g
        v_next = v + dt * (v_k1 + 2 * v_k2 + 2 * v_k3 + v_k4) / 6

        # integrate position
        p_k1 = v
        p_k2 = v + 0.5 * dt * v_k1  # k2 = v_next_half = v + 0.5 * dt * v' = v + 0.5 * dt * v_k1(evaluate at t0)
        p_k3 = v + 0.5 * dt * v_k2  # v_k2 is evaluated at t0 + 0.5*delta
        p_k4 = v + dt * v_k3
        p_next = p + dt * (p_k1 + 2 * p_k2 + 2 * p_k3 + p_k4) / 6

        self.nominal_state[:3] = p_next.reshape(3, )
        self.nominal_state[3:7] = q_next
        self.nominal_state[7:10] = v_next.reshape(3, )
Beispiel #5
0
    def getBlendedPose(self, poses, weights, additiveBlending=True):
        import transformations as tm

        REST_QUAT = np.asarray([1, 0, 0, 0], dtype=np.float32)

        if isinstance(poses[0], basestring):
            f_idxs = [self.getPoseNames().index(pname) for pname in poses]
        else:
            f_idxs = poses

        if not additiveBlending:
            # normalize weights
            if not isinstance(weights, np.ndarray):
                weights = np.asarray(weights, dtype=np.float32)
            t = sum(weights)
            if t < 1:
                # Fill up rest with neutral pose (neutral pose is assumed to be first frame)
                weights = np.asarray(weights + [1.0 - t], dtype=np.float32)
                f_idxs.append(0)
            weights /= t

        #print zip([self.getPoseNames()[_f] for _f in f_idxs],weights)

        result = emptyPose(self.nBones)
        m = np.identity(4, dtype=np.float32)
        m1 = np.identity(4, dtype=np.float32)
        m2 = np.identity(4, dtype=np.float32)

        if len(f_idxs) == 1:
            for b_idx in xrange(self.nBones):
                m[:3, :4] = self.getAtFramePos(f_idxs[0], True)[b_idx]
                q = tm.quaternion_slerp(REST_QUAT,
                                        tm.quaternion_from_matrix(m, True),
                                        float(weights[0]))
                result[b_idx] = tm.quaternion_matrix(q)[:3, :4]
        else:
            for b_idx in xrange(self.nBones):
                m1[:3, :4] = self.getAtFramePos(f_idxs[0], True)[b_idx]
                m2[:3, :4] = self.getAtFramePos(f_idxs[1], True)[b_idx]
                q1 = tm.quaternion_slerp(REST_QUAT,
                                         tm.quaternion_from_matrix(m1, True),
                                         float(weights[0]))
                q2 = tm.quaternion_slerp(REST_QUAT,
                                         tm.quaternion_from_matrix(m2, True),
                                         float(weights[1]))
                quat = tm.quaternion_multiply(q2, q1)

                for i, f_idx in enumerate(f_idxs[2:]):
                    i += 2
                    m[:3, :4] = self.getAtFramePos(f_idx, True)[b_idx]
                    q = tm.quaternion_slerp(REST_QUAT,
                                            tm.quaternion_from_matrix(m, True),
                                            float(weights[i]))
                    quat = tm.quaternion_multiply(q, quat)

                result[b_idx] = tm.quaternion_matrix(quat)[:3, :4]

        return Pose(self.name + '-blended', result)
def save_trajectory_to_file(time,
                            t_world_body,
                            R_world_body,
                            body_w_world_body_measured,
                            body_a_measured,
                            gyro_bias,
                            acc_bia,
                            world_v,
                            output_dir='/tmp'):

    # Trace groundtruth poses.
    file_out = open(os.path.join(output_dir, 'groundtruth.csv'), 'w')
    file_out.write(
        '# timestamp [ns], t_w_b(x) [m], t_w_b(y) [m], t_w_b(z) [m], q_w_b(x) [rad], q_w_b(y) [rad], q_w_b(z) [rad], q_w_b(w) [rad]\n'
    )
    for i in range(len(time)):
        T = np.eye(4)
        T[:3, :3] = np.reshape(R_world_body[i, :], (3, 3))
        q_world_body = transformations.quaternion_from_matrix(T)
        q_world_body = q_world_body / np.linalg.norm(q_world_body)
        #q_world_body = q_world_body / np.linalg.norm(q_world_body)
        file_out.write('%d, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f\n' %
                       (time[i], t_world_body[i, 0], t_world_body[i, 1],
                        t_world_body[i, 2], q_world_body[0], q_world_body[1],
                        q_world_body[2], q_world_body[3]))
    file_out.close()

    # Trace all groundtruth states (bias, velocity, pose)
    file_out = open(os.path.join(output_dir, 'groundtruth_states.csv'), 'w')
    file_out.write(
        '# timestamp [ns], p_RS_R_x [m], p_RS_R_y [m], p_RS_R_z [m], q_RS_w [], q_RS_x [], q_RS_y [], q_RS_z [],  v_RS_R_x [m s^-1], v_RS_R_y [m s^-1], v_RS_R_z [m s^-1], b_w_RS_S_x [rad s^-1], b_w_RS_S_y [rad s^-1], b_w_RS_S_z [rad s^-1], b_a_RS_S_x [m s^-2], b_a_RS_S_y [m s^-2], b_a_RS_S_z [m s^-2]\n'
    )
    for i in range(len(time)):
        T = np.eye(4)
        T[:3, :3] = np.reshape(R_world_body[i, :], (3, 3))
        q_world_body = transformations.quaternion_from_matrix(T)
        q_world_body = q_world_body / np.linalg.norm(q_world_body)
        #q_world_body = q_world_body / np.linalg.norm(q_world_body)
        file_out.write(
            '%d, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f\n'
            % (time[i], t_world_body[i, 0], t_world_body[i, 1],
               t_world_body[i, 2], q_world_body[3], q_world_body[0],
               q_world_body[1], q_world_body[2], world_v[i, 0], world_v[i, 1],
               world_v[i, 2], gyro_bias[i, 0], gyro_bias[i, 1],
               gyro_bias[i, 2], acc_bia[i, 0], acc_bia[i, 1], acc_bia[i, 2]))
    file_out.close()

    # Trace IMU Measurements
    file_out = open(os.path.join(output_dir, 'imu0_data.csv'), 'w')
    file_out.write('# timestamp, gx, gy, gz, ax, ay, az\n')
    for i in range(len(time)):
        file_out.write(
            '%d, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f\n' %
            (time[i], body_w_world_body_measured[i, 0],
             body_w_world_body_measured[i, 1],
             body_w_world_body_measured[i, 2], body_a_measured[i, 0],
             body_a_measured[i, 1], body_a_measured[i, 2]))
    file_out.close()
Beispiel #7
0
def interp_transforms(T1, T2, alpha):
    assert alpha <= 1
    T_avg = alpha * T1 + (1 - alpha) * T2
    q1 = quaternion_from_matrix(T1)
    q2 = quaternion_from_matrix(T2)
    q3 = quaternion_slerp(q1, q2, alpha)
    R = quaternion_matrix(q3)
    T_avg[0:3, 0:3] = R[0:3, 0:3]
    return T_avg
Beispiel #8
0
 def matrix_slerp(matA, matB, alpha=0.6):
     if matA is None:
         return matB
     import transformations
     qA = transformations.quaternion_from_matrix(matA)
     qB = transformations.quaternion_from_matrix(matB)
     qC =transformations.quaternion_slerp(qA, qB, alpha)
     mat = matB.copy()
     mat[:3,3] = (alpha)*matA[:3,3] + (1-alpha)*matB[:3,3]
     mat[:3,:3] = transformations.quaternion_matrix(qC)[:3,:3]
     return mat
    def RotationDeviationCost(x, n, waypoint, manip):
        tool_pose = manip.GetEndEffector().GetTransform()

        way_point_quat = transformations.quaternion_from_matrix(waypoint[0:3,
                                                                         0:3])
        quat = transformations.quaternion_from_matrix(tool_pose[0:3, 0:3])
        adjustment_quat = self.utils.QuaternionFromTo(way_point_quat, quat)
        axis_diff_mag = np.fabs(
            sum(self.utils.SubVec(way_point_quat[1:4], adjustment_quat[1:4])))

        return (0.25 * axis_mag + 0.75 * adjustment_quat[0]
                )  #25% of cost from different in axis
Beispiel #10
0
    def getBlendedPose(self, poses, weights, additiveBlending=True):
        import transformations as tm

        REST_QUAT = np.asarray([1,0,0,0], dtype=np.float32)

        if isinstance(poses[0], basestring):
            f_idxs = [self.getPoseNames().index(pname) for pname in poses]
        else:
            f_idxs = poses

        if not additiveBlending:
            # normalize weights
            if not isinstance(weights, np.ndarray):
                weights = np.asarray(weights, dtype=np.float32)
            t = sum(weights)
            if t < 1:
                # Fill up rest with neutral pose (neutral pose is assumed to be first frame)
                weights = np.asarray(weights + [1.0-t], dtype=np.float32)
                f_idxs.append(0)
            weights /= t

        #print zip([self.getPoseNames()[_f] for _f in f_idxs],weights)

        result = emptyPose(self.nBones)
        m = np.identity(4, dtype=np.float32)
        m1 = np.identity(4, dtype=np.float32)
        m2 = np.identity(4, dtype=np.float32)

        if len(f_idxs) == 1:
            for b_idx in xrange(self.nBones):
                m[:3, :4] = self.getAtFramePos(f_idxs[0], True)[b_idx]
                q = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m, True), float(weights[0]))
                result[b_idx] = tm.quaternion_matrix( q )[:3,:4]
        else:
            for b_idx in xrange(self.nBones):
                m1[:3, :4] = self.getAtFramePos(f_idxs[0], True)[b_idx]
                m2[:3, :4] = self.getAtFramePos(f_idxs[1], True)[b_idx]
                q1 = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m1, True), float(weights[0]))
                q2 = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m2, True), float(weights[1]))
                quat = tm.quaternion_multiply(q2, q1)

                for i,f_idx in enumerate(f_idxs[2:]):
                    i += 2
                    m[:3, :4] = self.getAtFramePos(f_idx, True)[b_idx]
                    q = tm.quaternion_slerp(REST_QUAT, tm.quaternion_from_matrix(m, True), float(weights[i]))
                    quat = tm.quaternion_multiply(q, quat)

                result[b_idx] = tm.quaternion_matrix( quat )[:3,:4]

        return Pose(self.name+'-blended', result)
Beispiel #11
0
def flip_custom_coordinate_system_legs(q):
    conv_m = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0,
                                                                   1]])

    m = quaternion_matrix(q)
    new_m = np.dot(conv_m, np.dot(m, conv_m))
    q = quaternion_from_matrix(new_m)

    conv_m = np.array([[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0,
                                                                   1]])

    m = quaternion_matrix(q)
    new_m = np.dot(conv_m, np.dot(m, conv_m))
    q = quaternion_from_matrix(new_m)
    return q
Beispiel #12
0
def interpolate(cameras, n_inter):
    '''
  Interpolate camera parameters to create virtual cameras
  Args:
    cams: list of cameras. Each entry is a tuple with camera params RTfckp
    n_inter: number of interpolations to make between each camera pair
  Returns:
    vcams: a list with all the interpolated virtual cameras
  '''
    from transformations import quaternion_from_matrix, quaternion_slerp, quaternion_matrix, interpolate_spherical

    ncams = len(cameras)

    inter_cameras = []
    for i in range(ncams):
        inter_cameras.append([])
        fractions = np.linspace(0, 1, n_inter + 2)[1:-1]

        inter_idx = 1
        for inter in range(n_inter):
            # Interpolate rotation matrices (R) in quaternion space.
            q0 = quaternion_from_matrix(cameras[i][0])
            q1 = quaternion_from_matrix(cameras[(i + 1) % ncams][0])
            q_inter = quaternion_slerp(q0, q1, fractions[inter])
            cam_inter = [quaternion_matrix(q_inter)[:3, :3]]

            # Interpolate translations (T) in spherical coords around the center
            # probably the ideal thing would be to use dual quaternion slerp.
            T_mid = interpolate_spherical(
                cameras[i][1].reshape(1, -1),
                cameras[(i + 1) % ncams][1].reshape(1, -1), fractions[inter]).T
            cam_inter.append(T_mid)

            # Linear interpolation for the rest of numeric params (f, c, k, p)
            for j in range(2, 6):
                cam_inter.append(
                    (cameras[i][j] + cameras[(i + 1) % ncams][j]) / 2.)

            # Give it a dummy name (name) - v for virtual
            cam_inter.append(cameras[i][6] + ".v{0}".format(inter_idx))
            inter_idx = inter_idx + 1
            inter_cameras[-1].append(cam_inter)

    # Put everything into one big list
    #allcams = sum([[cam]+intercam for cam,intercam in zip(cameras, inter_cameras)], [])
    vcams = list(itertools.chain(*inter_cameras))

    return vcams
Beispiel #13
0
  def extract_x_y_theta(self,
                        object_info,
                        t_worldaug_world=None,
                        preserve_theta=False):
    """Extract in-plane theta."""
    object_position = object_info[0]
    object_quat_xyzw = object_info[1]

    if t_worldaug_world is not None:
      object_quat_wxyz = (object_quat_xyzw[3], object_quat_xyzw[0],
                          object_quat_xyzw[1], object_quat_xyzw[2])
      t_world_object = transformations.quaternion_matrix(object_quat_wxyz)
      t_world_object[0:3, 3] = np.array(object_position)
      t_worldaug_object = t_worldaug_world @ t_world_object

      object_quat_wxyz = transformations.quaternion_from_matrix(
          t_worldaug_object)
      if not preserve_theta:
        object_quat_xyzw = (object_quat_wxyz[1], object_quat_wxyz[2],
                            object_quat_wxyz[3], object_quat_wxyz[0])
      object_position = t_worldaug_object[0:3, 3]

    object_xy = object_position[0:2]
    object_theta = -np.float32(
        utils.get_rot_from_pybullet_quaternion(object_quat_xyzw)
        [2]) / self.theta_scale
    return np.hstack(
        (object_xy,
         object_theta)).astype(np.float32), object_position, object_quat_xyzw
def calculatePoseError(tf0, tf1):
    numDimMat = 16
    numDimEul = 6
    numDimQuat = 7

    numData = len(tf0)  # should be the same for tf1
    matrixPoseError = np.empty((numData, numDimMat))
    translationPoseError = np.empty((numData, numDimMat))
    rotationPoseError = np.empty((numData, numDimMat))

    for i_data in range(numData):
        subtractedTau = tf0 - tf1
        deltaTau = np.lin_alg.inverse(tf0[i_data]).dot(tf1[i_data])
        diffTranslation = deltaTau[:3, 3]
        diffRotation = np.eye(4, 4)
        diffRotation[:3, :3] = deltaTau[:3, :3]

        diffQuat = quaternion_from_matrix(diffRotation)
        diffEuler = euler_from_matrix(diffRotation)

        # flip quaternions on the wrong side of the hypersphere
        if diffQuat[3] < 0:
            diffQuat = -diffQuat

        pose_error[i_data, :] = np.r_[diff_translation, diff_rot_rep]
    return pose_error
Beispiel #15
0
 def setRotationIndex(self, index, angle, useQuat):
     """
     Set the rotation for one of the three rotation channels, either as
     quaternion or euler matrix. index should be 1,2 or 3 and represents
     x, y and z axis accordingly
     """
     if useQuat:
         quat = tm.quaternion_from_matrix(self.matPose)
         log.debug("%s", str(quat))
         quat[index] = angle/1000
         log.debug("%s", str(quat))
         _normalizeQuaternion(quat)
         log.debug("%s", str(quat))
         self.matPose = tm.quaternion_matrix(quat)
         return quat[0]*1000
     else:
         angle = angle*D
         ax,ay,az = tm.euler_from_matrix(self.matPose, axes='sxyz')
         if index == 1:
             ax = angle
         elif index == 2:
             ay = angle
         elif index == 3:
             az = angle
         mat = tm.euler_matrix(ax, ay, az, axes='sxyz')
         self.matPose[:3,:3] = mat[:3,:3]
         return 1000.0
Beispiel #16
0
 def getRotation(self):
     """
     Get rotation matrix of rotation of this bone in local space.
     """
     qw,qx,qy,qz = tm.quaternion_from_matrix(self.matPose)
     ax,ay,az = tm.euler_from_matrix(self.matPose, axes='sxyz')
     return (1000*qw,1000*qx,1000*qy,1000*qz, ax/D,ay/D,az/D)
Beispiel #17
0
 def getRotation(self):
     """
     Get rotation matrix of rotation of this bone in local space.
     """
     qw,qx,qy,qz = tm.quaternion_from_matrix(self.matPose)
     ax,ay,az = tm.euler_from_matrix(self.matPose, axes='sxyz')
     return (1000*qw,1000*qx,1000*qy,1000*qz, ax/D,ay/D,az/D)
Beispiel #18
0
def back_front_thread():
    global published_transform
    rate = rospy.Rate(10)
    while True:
        time = rospy.Time.now()
        num = 0
        sum_mat = None
        #print("Time diff = %s" %(str(time.to_sec() - front_transform["time"].to_sec()) if front_transform["time"] is not None else "??"))
        if front_transform["transform"] is not None and time.to_sec(
        ) - front_transform["time"].to_sec() < 1:
            sum_mat = front_transform["transform"]
            num = 1.0
        # BRS Let's not use averages - seems to cause normailization errors?
        if back_transform["transform"] is not None and time.to_sec(
        ) - back_transform["time"].to_sec() < 1:
            sum_mat = back_transform[
                "transform"] if sum_mat is None else sum_mat + back_transform[
                    "transform"]
            num = num + 1.0
        if num > 0:
            transform = sum_mat / num
            published_transform = transform
            trans = tr.translation_from_matrix(transform)
            rot = tr.quaternion_from_matrix(transform)
            r, p, y = tr.euler_from_quaternion(rot)
            rot = tr.quaternion_from_euler(r, p, y)
            br.sendTransform(trans, rot, time, "odom", "map")
        # elif published_transform is not None:
        #     transform = published_transform
        #     trans = tr.translation_from_matrix(transform)
        #     rot = tr.quaternion_from_matrix(transform)
        #     br.sendTransform(trans, rot, time, "odom", "map")
        rate.sleep()
Beispiel #19
0
def get_quaternion(z_axis, world_up):
    """
    z_axis = numpy.ndarray(n_pts, 3)
    world_up = axis representing the y axis
    """
    world_up = np.tile(world_up, len(z_axis)).reshape(len(z_axis), 3)
    side_axis = np.cross(world_up, z_axis)
    side_axis = side_axis / np.linalg.norm(side_axis, axis=1).reshape(-1, 1)
    cam_locs_to_remove = np.where(np.isnan(np.linalg.norm(side_axis, axis=1)))
    cam_locs_to_take = np.ones(len(world_up)).astype(np.int)
    cam_locs_to_take[cam_locs_to_remove] = 0
    world_up = world_up[cam_locs_to_take.astype(np.bool)]
    side_axis = side_axis[cam_locs_to_take.astype(np.bool)]
    z_axis = z_axis[cam_locs_to_take.astype(np.bool)]
    up_axis = np.cross(z_axis, side_axis)

    # TODO: find a better way to do this
    rot_mat = np.zeros((len(z_axis), 4, 4))
    quats = list()
    for i in range(len(rot_mat)):
        rot_mat[i, :3, 0] = side_axis[i]
        rot_mat[i, :3, 1] = up_axis[i]
        rot_mat[i, :3, 2] = z_axis[i]
        rot_mat[i, 3, 3] = 1
        if np.isnan(np.sum(rot_mat)):
            print('in the nan of utils while generating quaternions')
            from IPython import embed
            embed()
        rot_mat[i] = sym(rot_mat[i])

        quats.append(transformations.quaternion_from_matrix(rot_mat[i]))
    return cam_locs_to_take, np.stack(quats)
def transform_to_robot_direct(pos, quaternion):
    pkl_file = open('pkl_files/moCap_robot_calib.pkl', 'rb')
    ret_pos = np.zeros((len(pos), 3))
    ret_quat = np.zeros((len(quaternion), 4))
    #    ret_axis_angle = np.zeros((len(pos), 3))
    MVR = np.zeros((3, 3))
    PMR = np.array([])
    MVR_226 = pickle.load(pkl_file)
    pos = np.transpose(pos)
    CC = np.dot(MVR_226, pos)
    CC = np.transpose(CC)
    CC_x = [a[0] for a in CC]
    CC_y = [a[1] for a in CC]
    CC_z = [a[2] for a in CC]
    ret_pos = np.vstack((CC_x, CC_y, CC_z)).T

    for r_idx in range(len(MVR_226) - 1):
        row = MVR_226[r_idx]
        MVR[r_idx] = row[0:3]
        PMR = np.append(PMR, row[3])

    for q_idx in range(len(quaternion)):
        q_for_tran = quaternion[q_idx]
        t_for_tran = transformations.quaternion_matrix_rev(q_for_tran)
        tmp_M_1 = np.dot(MVR, t_for_tran)
        transformed_q = transformations.quaternion_from_matrix(tmp_M_1)
        ret_quat[q_idx] = transformed_q
    return ret_pos, ret_quat
Beispiel #21
0
def generate_ankle_constraint_from_toe(skeleton,
                                       frames,
                                       frame_idx,
                                       ankle_joint_name,
                                       heel_joint,
                                       toe_joint_name,
                                       target_ground_height,
                                       toe_pos=None):
    """ create a constraint on the ankle position based on the toe constraint position"""
    #print "add toe constraint"
    if toe_pos is None:
        ct = skeleton.nodes[toe_joint_name].get_global_position(
            frames[frame_idx])
        ct[1] = target_ground_height  # set toe constraint on the ground
    else:
        ct = toe_pos

    a = skeleton.nodes[ankle_joint_name].get_global_position(frames[frame_idx])
    t = skeleton.nodes[toe_joint_name].get_global_position(frames[frame_idx])

    target_toe_offset = a - t  # difference between unmodified toe and ankle at the frame
    ca = ct + target_toe_offset  # move ankle so toe is on the ground

    m = skeleton.nodes[heel_joint].get_global_matrix(frames[frame_idx])
    m[:3, 3] = [0, 0, 0]
    oq = quaternion_from_matrix(m)
    oq = normalize(oq)

    return MotionGroundingConstraint(frame_idx, ankle_joint_name, ca, None, oq)
Beispiel #22
0
 def to_local_cos2(self, joint_name, frame, q):
     # bring into parent coordinate system
     parent_joint = self.skeleton.nodes[joint_name].parent.node_name
     pm = self.skeleton.nodes[parent_joint].get_global_matrix(frame)#[:3, :3]
     inv_p = quaternion_inverse(quaternion_from_matrix(pm))
     normalize(inv_p)
     return quaternion_multiply(inv_p, q)
Beispiel #23
0
    def get_six_dof_object(self, object_info, t_worldaug_world=None):
        """Calculate the pose of 6DOF object."""
        object_position = object_info[0]
        object_quat_xyzw = object_info[1]

        if t_worldaug_world is not None:
            object_quat_wxyz = (object_quat_xyzw[3], object_quat_xyzw[0],
                                object_quat_xyzw[1], object_quat_xyzw[2])
            t_world_object = transformations.quaternion_matrix(
                object_quat_wxyz)
            t_world_object[0:3, 3] = np.array(object_position)
            t_worldaug_object = t_worldaug_world @ t_world_object

            object_quat_wxyz = transformations.quaternion_from_matrix(
                t_worldaug_object)
            object_quat_xyzw = (object_quat_wxyz[1], object_quat_wxyz[2],
                                object_quat_wxyz[3], object_quat_wxyz[0])
            object_position = t_worldaug_object[0:3, 3]

        euler = utils.quatXYZW_to_eulerXYZ(object_quat_xyzw)
        roll = euler[0]
        pitch = euler[1]
        theta = -euler[2]

        return np.asarray([
            object_position[0], object_position[1], object_position[2], roll,
            pitch, theta
        ])
Beispiel #24
0
    def _boneToHash(self, boneHierarchy, bone, recursionLevel=1):
        out = {}
        out["name"] = bone.name
        out["headPos"] = bone.headPos
        out["tailPos"] = bone.tailPos

        restMatrix = bone.matRestGlobal
        matrix = np.array(
            (restMatrix[0], -restMatrix[2], restMatrix[1], restMatrix[3]))
        qw, qx, qy, qz = quaternion_from_matrix(matrix)
        if qw < 1e-4:
            roll = 0
        else:
            roll = math.pi - 2 * math.atan2(qy, qw)
        if roll < -math.pi:
            roll += 2 * math.pi
        elif roll > math.pi:
            roll -= 2 * math.pi

        out["matrix"] = [
            list(restMatrix[0, :]),
            list(restMatrix[1, :]),
            list(restMatrix[2, :]),
            list(restMatrix[3, :])
        ]
        out["roll"] = roll

        out["children"] = []
        boneHierarchy.append(out)

        # Just a security measure.
        if recursionLevel < 30:
            for child in bone.children:
                self._boneToHash(out["children"], child, recursionLevel + 1)
    def Init(self,
             pose_target,
             constraint_target,
             env,
             robot,
             utils,
             a_manip,
             goggles,
             goggles_robot,
             parent=None,
             child=None):
        self.pose_target = pose_target
        self.current_pose = pose_target
        self.constraint_target = constraint_target
        self.env = env
        self.robot = robot
        self.utils = utils
        self.a_manip = a_manip
        self.parent_node = parent
        self.child_node = child

        if (goggles != None):
            self.goggles = goggles
            self.g_pose = self.goggles.GetTransform()
            self.g_quat = self.utils.ArrayToQuat(
                transformations.quaternion_from_matrix(self.g_pose[0:3, 0:3]))
            self.goggles_robot = goggles_robot
Beispiel #26
0
def get_roll_to(head, tail, normal):
    """
    Compute the roll angle for a bone to make the bone's local x axis align with
    the specified normal.
    """
    p1 = toZisUp3(head)
    p2 = toZisUp3(tail)
    xvec = normal

    pvec = matrix.normalize(p2-p1)
    xy = np.dot(xvec,pvec)
    yvec = matrix.normalize(pvec-xy*xvec)
    zvec = matrix.normalize(np.cross(xvec, yvec))
    mat = np.asarray((xvec,yvec,zvec), dtype=np.float32)

    try:
        assertOrthogonal(mat)
    except Exception as e:
        log.warning("Calculated matrix is not orthogonal (%s)" % e)
    quat = tm.quaternion_from_matrix(mat)
    if abs(quat[0]) < 1e-4:
        return 0
    else:
        roll = math.pi - 2*math.atan(quat[2]/quat[0])

    if roll < -math.pi:
        roll += 2*math.pi
    elif roll > math.pi:
        roll -= 2*math.pi
    return roll
    def inframe(self, pose, frame):
        """ Transform a pose from one frame to another one.

        Uses transformation matrices. Could be refactored to use directly
        quaternions.
        """
        pose = self.get(pose)

        if pose['frame'] == "map":
            orig = numpy.identity(4)
        else:
            orig = self._to_mat4(self.get(pose["frame"]))

        if frame == "map":
            dest = numpy.identity(4)
        else:
            dest = numpy.linalg.inv(self._to_mat4(self.get(frame)))

        poseMatrix = self._to_mat4(pose)

        transf = numpy.dot(dest, orig)
        transformedPose = numpy.dot(transf, poseMatrix)

        qx,qy,qz,qw = transformations.quaternion_from_matrix(transformedPose)
        x,y,z = transformations.translation_from_matrix(transformedPose)

        return {"x":x,
                "y":y,
                "z":z,
                "qx":qx,
                "qy":qy,
                "qz":qz,
                "qw":qw,
                "frame": frame}
Beispiel #28
0
 def setRotationIndex(self, index, angle, useQuat):
     """
     Set the rotation for one of the three rotation channels, either as
     quaternion or euler matrix. index should be 1,2 or 3 and represents
     x, y and z axis accordingly
     """
     if useQuat:
         quat = tm.quaternion_from_matrix(self.matPose)
         log.debug("%s", str(quat))
         quat[index] = angle/1000
         log.debug("%s", str(quat))
         _normalizeQuaternion(quat)
         log.debug("%s", str(quat))
         self.matPose = tm.quaternion_matrix(quat)
         return quat[0]*1000
     else:
         angle = angle*D
         ax,ay,az = tm.euler_from_matrix(self.matPose, axes='sxyz')
         if index == 1:
             ax = angle
         elif index == 2:
             ay = angle
         elif index == 3:
             az = angle
         mat = tm.euler_matrix(ax, ay, az, axes='sxyz')
         self.matPose[:3,:3] = mat[:3,:3]
         return 1000.0
Beispiel #29
0
def euler_to_quaternion(euler_angles,
                        rotation_order=DEFAULT_ROTATION_ORDER,
                        filter_values=True):
    """Convert euler angles to quaternion vector [qw, qx, qy, qz]
    Parameters
    ----------
    * euler_angles: list of floats
    \tA list of ordered euler angles in degress
    * rotation_order: Iteratable
    \t a list that specifies the rotation axis corresponding to the values in euler_angles
    * filter_values: Bool
    \t enforce a unique rotation representation

    """
    assert len(euler_angles) == 3, ('The length of euler angles should be 3!')
    euler_angles = np.deg2rad(euler_angles)
    rotmat = euler_matrix(*euler_angles,
                          rotation_order_to_string(rotation_order))
    # convert rotation matrix R into quaternion vector (qw, qx, qy, qz)
    quat = quaternion_from_matrix(rotmat)
    # filter the quaternion see
    # http://physicsforgames.blogspot.de/2010/02/quaternions.html
    if filter_values:
        dot = np.sum(quat)
        if dot < 0:
            quat = -quat
    return [quat[0], quat[1], quat[2], quat[3]]
Beispiel #30
0
def quaternions_from_trajectory(trajectory):
    """
    Returns rotation quaternions from a sequence of transformation. 
    """
    return np.array(
        [transformations.quaternion_from_matrix(tr[:3, :3]) for tr in trajectory]
    )
Beispiel #31
0
def ur2np(ur_pose):
    # ROS pose
    ros_pose = []

    # ROS position
    ros_pose.append(ur_pose[0])
    ros_pose.append(ur_pose[1])
    ros_pose.append(ur_pose[2])

    # Ros orientation
    angle = sqrt(ur_pose[3]**2 + ur_pose[4]**2 + ur_pose[5]**2)
    direction = [i / angle for i in ur_pose[3:6]]
    np_T = tf.rotation_matrix(angle, direction)
    np_q = tf.quaternion_from_matrix(np_T)
    ros_pose.append(np_q[0])
    ros_pose.append(np_q[1])
    ros_pose.append(np_q[2])
    ros_pose.append(np_q[3])

    # orientation
    np_pose = tf.quaternion_matrix(
        [ros_pose[3], ros_pose[4], ros_pose[5], ros_pose[6]])

    # position
    np_pose[0][3] = ros_pose[0]
    np_pose[1][3] = ros_pose[1]
    np_pose[2][3] = ros_pose[2]

    return np_pose
def look_at_target(skeleton,
                   root,
                   end_effector,
                   frame,
                   position,
                   local_dir=LOOK_AT_DIR):
    """ find angle between the look direction and direction between end effector and target"""
    #direction of endeffector
    m = skeleton.nodes[end_effector].get_global_matrix(frame)
    #offset = skeleton.nodes[end_effector].offset
    end_effector_dir = np.dot(m[:3, :3], local_dir)
    end_effector_dir = end_effector_dir / np.linalg.norm(end_effector_dir)

    # direction from endeffector to target
    end_effector_pos = m[:3, 3]
    target_delta = position - end_effector_pos
    target_dir = target_delta / np.linalg.norm(target_delta)

    # find rotation to align vectors
    root_delta_q = quaternion_from_vector_to_vector(end_effector_dir,
                                                    target_dir)
    root_delta_q = normalize(root_delta_q)

    #apply global delta to get new global matrix of joint
    global_m = quaternion_matrix(root_delta_q)
    parent_joint = skeleton.nodes[root].parent.node_name
    parent_m = skeleton.nodes[parent_joint].get_global_matrix(frame,
                                                              use_cache=True)
    old_global = np.dot(parent_m, skeleton.nodes[root].get_local_matrix(frame))
    new_global = np.dot(global_m, old_global)
    q = quaternion_from_matrix(new_global)
    return normalize(q)
    def compute_grasping_direction(self):

        #gripper_xquat = transformations.quaternion_from_matrix(h_gripper)
        # at 1, 0, gripper is pointing toward -y, gripper right is pointing -x,
        ori_gripper_xmat = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])

        d_rim_pos = self.get_grasp_point_to_center()

        d_xy = d_rim_pos[:2]
        if d_xy[0] == 0 and d_xy[1] == 0:
            d_xy[1] = 0.00000001
        d_xy = d_xy / np.linalg.norm(d_xy)
        cos_theta = d_xy[0]
        sin_theta = d_xy[1]

        elevation = self.get_grasp_point_elevation()
        roll = self.get_grasp_point_roll()

        #ori_gripper_quat =  transformations.quaternion_from_matrix(ori_gripper_xmat)
        # add rotation on the xy plane
        xy_rot_xmat = np.array([[cos_theta, -sin_theta, 0],
                                [sin_theta, cos_theta, 0], [0, 0, 1]])

        # add elevation: elevation higher means camera looking more downwards
        roll_xmat = np.array(
            [[1, 0, 0],
             [0, np.cos(np.deg2rad(-roll)), -np.sin(np.deg2rad(-roll))],
             [0, np.sin(np.deg2rad(-roll)),
              np.cos(np.deg2rad(-roll))]])

        ele_xmat = np.array([[
            np.cos(np.deg2rad(-elevation)), 0,
            np.sin(np.deg2rad(-elevation))
        ], [0, 1, 0],
                             [
                                 -np.sin(np.deg2rad(-elevation)), 0,
                                 np.cos(np.deg2rad(-elevation))
                             ]])

        final_rot = np.matmul(
            np.matmul(xy_rot_xmat, np.matmul(ele_xmat, ori_gripper_xmat)),
            roll_xmat)

        # making the "thumb" of the gripper to point to y+
        # if not: rotate gripper with 180 degree
        if final_rot[1, 1] < 0:
            rot = 180
            xmat = np.array(
                [[1, 0, 0],
                 [0, np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot))],
                 [0, np.sin(np.deg2rad(-rot)),
                  np.cos(np.deg2rad(-rot))]])
            final_rot = np.matmul(final_rot, xmat)

        final_rot_4x4 = np.eye(4)
        final_rot_4x4[:3, :3] = final_rot

        gripper_xquat = transformations.quaternion_from_matrix(final_rot_4x4)

        return gripper_xquat
def look_at_target_projected(skeleton,
                             root,
                             end_effector,
                             frame,
                             position,
                             local_dir=LOOK_AT_DIR):
    """ find angle between the look direction and direction from end effector to target projected on the xz plane"""
    #direction of endeffector
    m = skeleton.nodes[root].get_global_matrix(frame)
    end_effector_dir = np.dot(m[:3, :3], local_dir)
    end_effector_dir[1] = 0
    end_effector_dir = end_effector_dir / np.linalg.norm(end_effector_dir)

    # direction from endeffector to target
    end_effector_pos = m[:3, 3]
    target_delta = position - end_effector_pos
    target_delta[1] = 0
    target_dir = target_delta / np.linalg.norm(target_delta)

    # find rotation to align vectors
    root_delta_q = quaternion_from_vector_to_vector(end_effector_dir,
                                                    target_dir)
    root_delta_q = normalize(root_delta_q)
    #apply global delta to get new global matrix of joint
    global_m = quaternion_matrix(root_delta_q)
    old_global = skeleton.nodes[root].get_global_matrix(frame)
    new_global = np.dot(global_m, old_global)
    q = quaternion_from_matrix(new_global)
    return normalize(q)
def orient_end_effector_to_target(skeleton, root, end_effector, frame,
                                  constraint):
    """ find angle between the vectors end_effector - root and target- root """

    # align vectors
    root_pos = skeleton.nodes[root].get_global_position(frame)
    if constraint.offset is not None:
        m = skeleton.nodes[end_effector].get_global_matrix(frame)
        end_effector_pos = np.dot(m, constraint.offset)[:3]
    else:
        end_effector_pos = skeleton.nodes[end_effector].get_global_position(
            frame)

    src_delta = end_effector_pos - root_pos
    src_dir = src_delta / np.linalg.norm(src_delta)

    target_delta = constraint.position - root_pos
    target_dir = target_delta / np.linalg.norm(target_delta)

    root_delta_q = quaternion_from_vector_to_vector(src_dir, target_dir)
    root_delta_q = normalize(root_delta_q)

    if skeleton.nodes[root].stiffness > 0:
        t = 1 - skeleton.nodes[root].stiffness
        root_delta_q = quaternion_slerp([1, 0, 0, 0], root_delta_q, t)
        root_delta_q = normalize(root_delta_q)

    global_m = quaternion_matrix(root_delta_q)
    parent_joint = skeleton.nodes[root].parent.node_name
    parent_m = skeleton.nodes[parent_joint].get_global_matrix(frame,
                                                              use_cache=True)
    old_global = np.dot(parent_m, skeleton.nodes[root].get_local_matrix(frame))
    new_global = np.dot(global_m, old_global)
    q = quaternion_from_matrix(new_global)
    return normalize(q)
def calculatePoseError(tf0, tf1):
    numDimMat = 16
    numDimEul = 6
    numDimQuat = 7
    
    numData = len(tf0) # should be the same for tf1
    matrixPoseError = np.empty((numData, numDimMat))
    translationPoseError = np.empty((numData, numDimMat))
    rotationPoseError = np.empty((numData, numDimMat))
    
    
    for i_data in range(numData):
        subtractedTau = tf0 - tf1
        deltaTau = np.lin_alg.inverse(tf0[i_data]).dot(tf1[i_data])
        diffTranslation = deltaTau[:3,3]
        diffRotation = np.eye(4,4)
        diffRotation[:3,:3] = deltaTau[:3,:3]
        
        diffQuat = quaternion_from_matrix(diffRotation)
        diffEuler = euler_from_matrix(diffRotation)
        
        # flip quaternions on the wrong side of the hypersphere
        if diffQuat[3] < 0:
            diffQuat = -diffQuat
            
        pose_error[i_data,:] = np.r_[diff_translation, diff_rot_rep]
    return pose_error
Beispiel #37
0
	def __modify_quaternion( self , q , d ) :
		glPushMatrix()
		glLoadMatrixf( tr.quaternion_matrix( q ) )
		glRotatef( d[0] , 0 , 1 , 0 )
		glRotatef( d[1] , 1 , 0 , 0 )
		q = tr.quaternion_from_matrix( glGetDoublev(GL_MODELVIEW_MATRIX) )
		glPopMatrix()
		return q
Beispiel #38
0
 def fromMatrix(self, matrix):
     q0 = tm.quaternion_from_matrix(matrix)
     t = matrix[:3,3]
     self.even = q0
     q1 = self.odd
     q1[0] = -0.5*(t[0]*q0[1] + t[1]*q0[2] + t[2]*q0[3])
     q1[1] = 0.5*( t[0]*q0[0] + t[1]*q0[3] - t[2]*q0[2])
     q1[2] = 0.5*(-t[0]*q0[3] + t[1]*q0[0] + t[2]*q0[1])
     q1[3] = 0.5*( t[0]*q0[2] - t[1]*q0[1] + t[2]*q0[0])
Beispiel #39
0
def rvec2quat(rvec, cam2body):
    Rned2cam, jac = cv2.Rodrigues(rvec)
    Rned2body = cam2body.dot(Rned2cam)
    Rbody2ned = np.matrix(Rned2body).T
    # make 3x3 rotation matrix into 4x4 homogeneous matrix
    hIR = np.concatenate( (np.concatenate( (Rbody2ned, np.zeros((3,1))),1),
                           np.mat([0,0,0,1])) )
    quat = transformations.quaternion_from_matrix(hIR)
    return quat
def _plot_img(img, K, T, z=0.1):
    obj = mlab.imshow(img.T)
    quat = tf.quaternion_from_matrix(T)
    angle, axis = angle_axis_from_quaternion(quat)
    obj.actor.rotate_wxyz(angle * 180 / np.pi, *axis)
    h, w = img.shape
    xmax_pixel, ymax_pixel = w, h
    point3d = projectionto3d(K, (xmax_pixel, ymax_pixel))[0]
    origin3d = projectionto3d(K, (0, 0))[0]
    point3d = point3d * z / point3d[2]
    origin3d = origin3d * z / origin3d[2]
    center3d = (point3d + origin3d) / 2.
    xmax, ymax, _ = point3d - origin3d
    obj.actor.scale = np.array([xmax / xmax_pixel, ymax / ymax_pixel, 1.0])
    obj.actor.position = utils.apply_transform(T, center3d)
    mlab.view(distance=20 * z)
    return obj
Beispiel #41
0
def computeRoll(head, tail, normal, bone=None):
    if normal is None:
        return 0

    p1 = m2b(head)
    p2 = m2b(tail)
    xvec = normal
    pvec = getUnitVector(p2-p1)
    xy = np.dot(xvec,pvec)
    yvec = getUnitVector(pvec-xy*xvec)
    zvec = getUnitVector(np.cross(xvec, yvec))
    if zvec is None:
        return 0
    else:
        mat = np.array((xvec,yvec,zvec))

    checkOrthogonal(mat)
    quat = tm.quaternion_from_matrix(mat)
    if abs(quat[0]) < 1e-4:
        return 0
    else:
        roll = math.pi - 2*math.atan(quat[2]/quat[0])

    if roll < -math.pi:
        roll += 2*math.pi
    elif roll > math.pi:
        roll -= 2*math.pi
    return roll

    if bone and bone.name in ["forearm.L", "forearm.R"]:
        log.debug("B  %s" % bone.name)
        log.debug(" H  %.4g %.4g %.4g" % tuple(head))
        log.debug(" T  %.4g %.4g %.4g" % tuple(tail))
        log.debug(" N  %.4g %.4g %.4g" % tuple(normal))
        log.debug(" P  %.4g %.4g %.4g" % tuple(pvec))
        log.debug(" X  %.4g %.4g %.4g" % tuple(xvec))
        log.debug(" Y  %.4g %.4g %.4g" % tuple(yvec))
        log.debug(" Z  %.4g %.4g %.4g" % tuple(zvec))
        log.debug(" Q  %.4g %.4g %.4g %.4g" % tuple(quat))
        log.debug(" R  %.4g" % roll)

    return roll
Beispiel #42
0
def contacts_to_baxter_hand_pose(contact1, contact2):
    c1 = np.array(contact1)
    c2 = np.array(contact2)

    # compute gripper center and axis
    center = 0.5 * (c1 + c2)
    y_axis = c2 - c1
    y_axis = y_axis / np.linalg.norm(y_axis)
    z_axis = np.array([y_axis[1], -y_axis[0], 0])  # the z axis will always be in the table plane for now
    z_axis = z_axis / np.linalg.norm(z_axis)
    x_axis = np.cross(y_axis, z_axis)

    # convert to hand pose
    R_obj_gripper = np.array([x_axis, y_axis, z_axis]).T
    t_obj_gripper = center
    T_obj_gripper = np.eye(4)
    T_obj_gripper[:3, :3] = R_obj_gripper
    T_obj_gripper[:3, 3] = t_obj_gripper
    q_obj_gripper = transformations.quaternion_from_matrix(T_obj_gripper)

    return t_obj_gripper, q_obj_gripper
Beispiel #43
0
    def prepair_data(self, image_list, matches_list, K):
        # iterate through the image list and build the camera pose dictionary
        # (and a simple list of camera locations for plotting)
        f = open( self.root + '/sba-cams.txt', 'w' )
        for image in image_list:
            body2cam = image.get_body2cam()
            ned2body = image.get_ned2body()
            Rtotal = body2cam.dot( ned2body )
            q = transformations.quaternion_from_matrix(Rtotal)
            rvec, tvec = image.get_proj()
            s = "%.8f %.8f %.8f %.8f %.8f %.8f %.8f\n" % (q[0], q[1], q[2], q[3],
                                                          tvec[0,0], tvec[1,0], tvec[2,0])
            f.write(s)
        f.close()

        # iterate through the matches dictionary to produce a list of matches
        f = open( self.root + '/sba-points.txt', 'w' )
        for match in matches_list:
            ned = np.array(match[0])
            s = "%.4f %.4f %.4f " % (ned[0], ned[1], ned[2])
            f.write(s)
            s = "%d " % (len(match[1:]))
            f.write(s)
            for p in match[1:]:
                image_num = p[0]
                # kp = image_list[image_num].kp_list[p[1]].pt # undistorted
                kp = image_list[image_num].uv_list[p[1]]      # distorted
                s = "%d %.2f %.2f " % (image_num, kp[0], kp[1])
                f.write(s)
            f.write('\n')
        f.close()

        # generate the calibration matrix (K) file
        f = open( self.root + '/sba-calib.txt', 'w' )
        s = "%.4f %.4f %.4f\n" % (K[0,0], K[0,1], K[0,2])
        f.write(s)
        s = "%.4f %.4f %.4f\n" % (K[1,0], K[1,1], K[1,2])
        f.write(s)
        s = "%.4f %.4f %.4f\n" % (K[2,0], K[2,1], K[2,2])
        f.write(s)
Beispiel #44
0
def get_a2b(a, b, rep_out='rotmat'):
    """Returns an SO3 quantity that will align vector a with vector b.
    The output will be in the representation selected by rep_out
    ('rotmat', 'quaternion', or 'rotvec').

    >>> a = trns.random_vector(3)
    >>> b = trns.random_vector(3)
    >>> R = get_a2b(a, b)
    >>> p1 = R.dot(a)
    >>> print(np.allclose(np.cross(p1, b), [0, 0, 0]))
    True
    >>> p1.dot(b) > 0
    True
    >>> q = get_a2b(a, b, 'quaternion')
    >>> p2 = apply_to_points(q, a)
    >>> print(np.allclose(np.cross(p2, b), [0, 0, 0]))
    True
    >>> r = get_a2b(a, b, 'rotvec')
    >>> p3 = apply_to_points(r, a)
    >>> print(np.allclose(np.cross(p3, b), [0, 0, 0]))
    True

    """
    a = normalize(a)
    b = normalize(b)
    cosine = a.dot(b)
    if np.isclose(abs(cosine), 1):
        R = np.eye(3)
    else:
        axis = np.cross(a, b)
        angle = np.arctan2(npl.norm(axis), cosine)
        R = trns.rotation_matrix(angle, axis)
    if rep_out == 'rotmat':
        return R[:3, :3]
    elif rep_out == 'quaternion':
        return trns.quaternion_from_matrix(R)
    elif rep_out == 'rotvec':
        return get_rotvec(R)
    else:
        raise ValueError("Invalid rep_out. Choose 'rotmat', 'quaternion', or 'rotvec'.")
Beispiel #45
0
 def setRotationIndex(self, index, angle, useQuat):
     if useQuat:
         quat = tm.quaternion_from_matrix(self.matrixPose)
         log.debug("%s", str(quat))
         quat[index] = angle/1000
         log.debug("%s", str(quat))
         normalizeQuaternion(quat)
         log.debug("%s", str(quat))
         self.matrixPose = tm.quaternion_matrix(quat)
         return quat[0]*1000    
     else:
         angle = angle*D
         ax,ay,az = tm.euler_from_matrix(self.matrixPose, axes='sxyz')
         if index == 1:
             ax = angle
         elif index == 2:
             ay = angle
         elif index == 3:
             az = angle
         mat = tm.euler_matrix(ax, ay, az, axes='sxyz')
         self.matrixPose[:3,:3] = mat[:3,:3]
         return 1000.0
def contacts_to_baxter_hand_pose(c1, c2):
    # compute gripper center and axis
    center = 0.5 * (c1 + c2)
    y_axis = c2 - c1
    print y_axis
    y_axis = y_axis / np.linalg.norm(y_axis)
    print y_axis
    x = np.array([y_axis[1], -y_axis[0], 0]) # the x axis will always be in the table plane for now
    x = x / np.linalg.norm(x)
    z = np.cross(x, y_axis)
    if z[2] < 0:
        x = -x
        z = np.cross(x, y_axis)

    # convert to hand pose
    R_obj_gripper = np.array([x, y_axis, z]).T
    t_obj_gripper = center
    T_obj_gripper = np.eye(4)
    T_obj_gripper[:3,:3] = R_obj_gripper
    T_obj_gripper[:3,3] = t_obj_gripper
    q_obj_gripper = transformations.quaternion_from_matrix(T_obj_gripper)

    return t_obj_gripper, q_obj_gripper 
Beispiel #47
0
 def listPose(self):
     for bone in self.boneList:
         quat = tm.quaternion_from_matrix(bone.matrixPose)
         log.debug("  %s %s", bone.name, quat)
Beispiel #48
0
def solvePnP2( pts_dict ):
    # build a new cam_dict that is a copy of the current one
    cam_dict = {}
    for i1 in proj.image_list:
        cam_dict[i1.name] = {}
        rvec, tvec = i1.get_proj()
        ned, ypr, quat = i1.get_camera_pose()
        cam_dict[i1.name]['rvec'] = rvec
        cam_dict[i1.name]['tvec'] = tvec
        cam_dict[i1.name]['ned'] = ned

    camw, camh = proj.cam.get_image_params()
    for i, i1 in enumerate(proj.image_list):
        print i1.name
        scale = float(i1.width) / float(camw)
        K = proj.cam.get_K(scale)
        rvec1, tvec1 = i1.get_proj()
        cam2body = i1.get_cam2body()
        body2cam = i1.get_body2cam()
        # include our own position in the average
        quat_start_weight = 100
        ned_start_weight = 2
        count = 0
        sum_quat = np.array(i1.camera_pose['quat']) * quat_start_weight
        sum_ned = np.array(i1.camera_pose['ned']) * ned_start_weight
        for j, i2 in enumerate(proj.image_list):
            matches = i1.match_list[j]
            if len(matches) < 8:
                continue
            count += 1
            rvec2, tvec2 = i2.get_proj()
            # build obj_pts and img_pts to position i1 relative to the
            # matches in i2.
            img1_pts = []
            img2_pts = []
            obj_pts = []
            for pair in matches:
                kp1 = i1.kp_list[ pair[0] ]
                img1_pts.append( kp1.pt )
                kp2 = i2.kp_list[ pair[1] ]
                img2_pts.append( kp2.pt )
                key = "%d-%d" % (i, pair[0])
                if not key in pts_dict:
                    key = "%d-%d" % (j, pair[1])
                # print key, pts_dict[key]
                obj_pts.append(pts_dict[key])

            # compute the centroid of obj_pts
            sum = np.zeros(3)
            for p in obj_pts:
                sum += p
            obj_center = sum / len(obj_pts)
            print "obj_pts center =", obj_center
            
            # given the previously computed triangulations (and
            # averages of point 3d locations if they are involved in
            # multiple triangulations), then compute an estimate for
            # both matching camera poses.  The relative positioning of
            # these camera poses should be pretty accurate.
            (result, rvec1, tvec1) = cv2.solvePnP(np.float32(obj_pts),
                                                  np.float32(img1_pts),
                                                  K, None, rvec1, tvec1,
                                                  useExtrinsicGuess=True)
            (result, rvec2, tvec2) = cv2.solvePnP(np.float32(obj_pts),
                                                  np.float32(img2_pts),
                                                  K, None, rvec2, tvec2,
                                                  useExtrinsicGuess=True)
            
            Rned2cam1, jac = cv2.Rodrigues(rvec1)
            Rned2cam2, jac = cv2.Rodrigues(rvec2)
            ned1 = -np.matrix(Rned2cam1[:3,:3]).T * np.matrix(tvec1)
            ned2 = -np.matrix(Rned2cam2[:3,:3]).T * np.matrix(tvec2)
            print "cam1 orig=%s new=%s" % (i1.camera_pose['ned'], ned1)
            print "cam2 orig=%s new=%s" % (i2.camera_pose['ned'], ned2)

            # compute a rigid transform (rotation + translation) to
            # align the estimated camera locations (projected from the
            # triangulation) back with the original camera points, and
            # roughly rotated around the centroid of the object
            # points.
            src = np.zeros( (3,3) ) # current camera locations
            dst = np.zeros( (3,3) ) # original camera locations
            src[0,:] = np.squeeze(ned1)
            src[1,:] = np.squeeze(ned2)
            src[2,:] = obj_center
            dst[0,:] = i1.camera_pose['ned']
            dst[1,:] = i2.camera_pose['ned']
            dst[2,:] = obj_center
            print "src:\n", src
            print "dst:\n", dst
            M = transformations.superimposition_matrix(src, dst)
            print "M:\n", M

            # Our (i1) Rned2cam matrix is body2cam * Rned, so solvePnP
            # returns this combination.  We can extract Rbody2ned by
            # premultiplying by cam2body aka inv(body2cam).
            Rned2body = cam2body.dot(Rned2cam1)
            # print "Rned2body:\n", Rned2body
            Rbody2ned = np.matrix(Rned2body).T # IR
            # print "Rbody2ned:\n", Rbody2ned
            # print "R (after M * R):\n", R

            # now transform by the earlier "M" affine transform to
            # rotate the work space closer to the actual camera points
            Rot = M[:3,:3].dot(Rbody2ned)
            
            # make 3x3 rotation matrix into 4x4 homogeneous matrix
            hRot = np.concatenate( (np.concatenate( (Rot, np.zeros((3,1))),1),
                                    np.mat([0,0,0,1])) )
            # print "hRbody2ned:\n", hRbody2ned

            quat = transformations.quaternion_from_matrix(hRot)
            sum_quat += quat
            
            sum_ned += np.squeeze(np.asarray(ned1))

        print "count = ", count
        newned = sum_ned / (ned_start_weight + count)
        print "orig ned =", i1.camera_pose['ned']
        print "new ned =", newned
        newquat = sum_quat / (quat_start_weight + count)
        newIR = transformations.quaternion_matrix(newquat)
        print "orig ypr = ", i1.camera_pose['ypr']
        (yaw, pitch, roll) = transformations.euler_from_quaternion(newquat, 'rzyx')
        print "new ypr =", [yaw/d2r, pitch/d2r, roll/d2r]

        newR = np.transpose(newIR[:3,:3]) # equivalent to inverting
        newRned2cam = body2cam.dot(newR[:3,:3])
        rvec, jac = cv2.Rodrigues(newRned2cam)
        tvec = -np.matrix(newRned2cam) * np.matrix(newned).T

        #print "orig pose:\n", cam_dict[i1.name]
        cam_dict[i1.name]['rvec'] = rvec
        cam_dict[i1.name]['tvec'] = tvec
        cam_dict[i1.name]['ned'] = newned
        #print "new pose:\n", cam_dict[i1.name]
    return cam_dict
Beispiel #49
0
 def getPoseQuaternion(self):
     return tm.quaternion_from_matrix(self.matrixPose)
Beispiel #50
0
	def set_matrix( self , mb , me ) :
		self.qb = tr.quaternion_from_matrix( mb )
		self.qe = tr.quaternion_from_matrix( me )
for trial in range(0,5):
    H_file = "/Users/isa/Dropbox/MultiModalRegPaper/ground_truth/rand_t/H_" + str(trial) + ".txt"
    max_angle = (np.pi/18.0) #max angular error 10 degrees
    max_t = 5 #max translation 5 meters
    # H = np.identity(4)
    R_rand = np.identity(3)
    scale = 1.0;

    #generate a random rotation angle smaller than max_angle
    rand_angle = rotations.random_rot_angle(max_angle)
    print "Random Angle: "
    print rand_angle

    #generate a random rotation matrix of fixed angle
    rotations.R_random_axis(R_rand, rand_angle)
    Q_rand = tf.quaternion_from_matrix(R_rand);
    Q_vxl = np.array([float(Q_rand[1]),
                     float(Q_rand[2]),
                     float(Q_rand[3]),
                     float(Q_rand[0])])

    # H[:3, :3]= R_rand
    print "Rotation Matrix: "
    print R_rand
    print Q_vxl


    #generate random translation
    T = (np.random.random(3) - 0.5)* 2.0 * max_t
    print "Translation : "
    print T
Beispiel #52
0
def solvePnP1( pts_dict ):
    # start with a clean slate
    for i1 in proj.image_list:
        i1.img_pts = []
        i1.obj_pts = []

    # build a new cam_dict that is a copy of the current one
    cam_dict = {}
    for i1 in proj.image_list:
        cam_dict[i1.name] = {}
        rvec, tvec = i1.get_proj()
        ned, ypr, quat = i1.get_camera_pose()
        cam_dict[i1.name]['rvec'] = rvec
        cam_dict[i1.name]['tvec'] = tvec
        cam_dict[i1.name]['ned'] = ned

    camw, camh = proj.cam.get_image_params()
    for i, i1 in enumerate(proj.image_list):
        print i1.name
        scale = float(i1.width) / float(camw)
        K = proj.cam.get_K(scale)
        rvec, tvec = i1.get_proj()
        cam2body = i1.get_cam2body()
        body2cam = i1.get_body2cam()
        # include our own position in the average
        count = 1
        sum_quat = np.array(i1.camera_pose['quat'])
        sum_ned = np.array(i1.camera_pose['ned'])
        for j, i2 in enumerate(proj.image_list):
            matches = i1.match_list[j]
            if len(matches) < 8:
                continue
            count += 1
            # build obj_pts and img_pts to position i1 relative to the
            # matches in i2
            obj_pts = []
            img_pts = []
            for pair in matches:
                kp = i1.kp_list[ pair[0] ]
                img_pts.append( kp.pt )
                key = "%d-%d" % (i, pair[0])
                if not key in pts_dict:
                    key = "%d-%d" % (j, pair[1])
                # print key, pts_dict[key]
                obj_pts.append(pts_dict[key])
                #if key in pts_dict:
                #    sum_dict[key] += p
                #else:
                #    sum_dict[key] = p

            (result, rvec, tvec) = cv2.solvePnP(np.float32(obj_pts),
                                                np.float32(img_pts),
                                                K, None, rvec, tvec,
                                                useExtrinsicGuess=True)
            #print "rvec=%s tvec=%s" % (rvec, tvec)
            Rned2cam, jac = cv2.Rodrigues(rvec)
            #print "Rned2cam (from SolvePNP):\n", Rned2cam

            # Our Rcam matrix (in our ned coordinate system) is
            # body2cam * Rned, so solvePnP returns this combination.
            # We can extract Rned by premultiplying by cam2body aka
            # inv(body2cam).
            Rned2body = cam2body.dot(Rned2cam)
            # print "Rned2body:\n", Rned2body
            Rbody2ned = np.matrix(Rned2body).T # IR
            # print "Rbody2ned:\n", Rbody2ned
            # print "R (after M * R):\n", R

            # make 3x3 rotation matrix into 4x4 homogeneous matrix
            hRbody2ned = np.concatenate( (np.concatenate( (Rbody2ned, np.zeros((3,1))),1), np.mat([0,0,0,1])) )
            # print "hRbody2ned:\n", hRbody2ned

            quat = transformations.quaternion_from_matrix(hRbody2ned)
            sum_quat += quat
            
            pos = -np.matrix(Rned2cam[:3,:3]).T * np.matrix(tvec)
            sum_ned += np.squeeze(np.asarray(pos))
            print "ned =", np.squeeze(np.asarray(pos))

        print "count = ", count
        newned = sum_ned / float(count)
        print "orig ned =", i1.camera_pose['ned']
        print "new ned =", newned
        newquat = sum_quat / float(count)
        newIR = transformations.quaternion_matrix(newquat)
        print "orig ypr = ", i1.camera_pose['ypr']
        (yaw, pitch, roll) = transformations.euler_from_quaternion(newquat, 'rzyx')
        print "new ypr =", [yaw/d2r, pitch/d2r, roll/d2r]

        newR = np.transpose(newIR[:3,:3]) # equivalent to inverting
        newRned2cam = body2cam.dot(newR[:3,:3])
        rvec, jac = cv2.Rodrigues(newRned2cam)
        tvec = -np.matrix(newRned2cam) * np.matrix(newned).T

        #print "orig pose:\n", cam_dict[i1.name]
        cam_dict[i1.name]['rvec'] = rvec
        cam_dict[i1.name]['tvec'] = tvec
        cam_dict[i1.name]['ned'] = newned
        #print "new pose:\n", cam_dict[i1.name]
    return cam_dict
Beispiel #53
0
 def getRotation(self):  
     qw,qx,qy,qz = tm.quaternion_from_matrix(self.matrixPose)
     ax,ay,az = tm.euler_from_matrix(self.matrixPose, axes='sxyz')
     return (1000*qw,1000*qx,1000*qy,1000*qz, ax/D,ay/D,az/D)
def compute_error(trial, descriptor_type, niter, percentile=99):

    src_scene_root = "/Users/isa/Experiments/reg3d_eval/downtown_dan/trial_" +str(trial);
    src_features_dir = "/Users/isa/Experiments/reg3d_eval/downtown_dan/trial_" +str(trial)+ "/" + descriptor_type + "_30"

    #read "geo-transformatiom"
    #************GEO**************"
    Tfile = "/data/lidar_providence/downtown_offset-1-financial-Hs.txt"
    Tfis = open(Tfile, 'r')
    lines=[];
    lines = Tfis.readlines();
    scale_geo = float(lines[0])
    Ss_geo = tf.scale_matrix(scale_geo);
    quat_line = lines[1].split(" ");
    quat_geo = np.array([float(quat_line[3]), float(quat_line[0]), float(quat_line[1]), float(quat_line[2])])
    Rs_geo = tf.quaternion_matrix(quat_geo);
    trans_line = lines[2].split(" ");
    trans_geo = np.array([float(trans_line[0]), float(trans_line[1]), float(trans_line[2])]);
    Tfis.close();
    Hs_geo = Rs_geo.copy();
    Hs_geo[:3, 3] = trans_geo[:3]
    Hs_geo = Ss_geo.dot(Hs_geo)
  
    LOG.debug( "\n************Geo************** \n Scale: \n%s \nR:\n%s \nT:\n%s \nH:\n%s", Ss_geo, Rs_geo, trans_geo, Hs_geo)
    
      
    #************Hs**************#  
    #read source to target "Ground Truth" Transformation
    Tfile = src_scene_root + "/Hs.txt";
    Tfis = open(Tfile, 'r')
    lines=[];
    lines = Tfis.readlines();
    scale = float(lines[0])
    Ss = tf.scale_matrix(scale);
    quat_line = lines[1].split(" ");
    quat = tf.unit_vector(np.array([float(quat_line[3]), float(quat_line[0]), float(quat_line[1]), float(quat_line[2])]))
    Hs = tf.quaternion_matrix(quat);
    trans_line = lines[2].split(" ");
    Ts = np.array([float(trans_line[0]), float(trans_line[1]), float(trans_line[2])]);
    Tfis.close();
    Rs = Hs.copy()[:3,:3];
    Hs[:3, 3] = Ts[:3]
    Hs=Ss.dot(Hs) 
    
    Rs = Rs;
    Ts = Ts;
  
    LOG.debug( "\n************Hs************** \n R:\n%s \nT:\n%s \nH:\n%s", Rs, Ts, Hs)
  
    #************Hs IA**************
    #read source to target "Initial Alignment" Transformation
    Tfile = src_features_dir + "/ia_transformation_" + str(percentile) + "_" + str(niter) + ".txt";    
    Tfis = open(Tfile, 'r')
    Hs_ia = np.genfromtxt(Tfis, skip_header=1, usecols={0,1,2,3} );
    Tfis.close()
    Tfis = open(Tfile, 'r')
    Ss_ia=np.genfromtxt(Tfis, skip_footer=4, usecols={0} );
    Tfis.close()
    Rs_ia = Hs_ia[:3,:3]*(1.0/Ss_ia)
    Ts_ia = Hs_ia[:3,3]*(1.0/Ss_ia)
    
    LOG.debug( "\n************Hs IA************** \n R:\n%s \nT:\n%s \nH:\n%s", Rs_ia, Ts_ia, Hs_ia)
    
    #Initial Aligment errors
    #Rotation error - half angle between the normalized quaterions
    quat_ia = tf.unit_vector(tf.quaternion_from_matrix(Rs_ia));
    Rs_error_ia_norm = math.acos(abs(np.dot(quat_ia, quat)));
    
    #Translation error
    # x = Rs_ia*x_ia + Ts_ia = Rs_ia(x_ia + np.dot(Rs_ia.T(), Ts_ia)
    # np.dot(Rs_ia.T(), Ts_ia) correspond to trans on ia coordinate system
    Ts_error_ia = (Rs_ia.T).dot(Ts_ia) - (Rs.T).dot(Ts)
    Ts_error_ia_norm = scale_geo*scale*LA.norm(Ts_error_ia)
    
    LOG.debug(  "Error (R,T) %s , %s ", Rs_error_ia_norm , Ts_error_ia_norm )
        
        
    #read source to target "Initial Alignment" Transformation
    #************Hs ICP**************
    Tfile = src_features_dir + "/icp_transformation_" + str(percentile) + "_" + str(niter) + ".txt";    
    Tfis = open(Tfile, 'r')
    Hs_icp = np.genfromtxt(Tfis, usecols={0,1,2,3});
    Tfis.close()
    
    Hs_icp = Hs_icp.dot(Hs_ia)
    Rs_icp = Hs_icp[:3,:3]*(1.0/Ss_ia)
    Ts_icp = Hs_icp[:3,3]*(1.0/Ss_ia)
    
    LOG.debug( "\n************Hs ICP************** \n R:\n%s \nT:\n%s \nH:\n%s", Rs_icp, Ts_icp, Hs_icp)

    #ICP errors    
    #Rotation error - half angle between the normalized quaterions
    quat_icp = tf.unit_vector(tf.quaternion_from_matrix(Rs_icp));
    Rs_error_icp_norm = math.acos(abs(np.dot(quat_icp, quat)));
    
    #Translation error
    # x = Rs_ia*x_ia + Ts_ia = Rs_ia(x_ia + np.dot(Rs_ia.T(), Ts_ia)
    # np.dot(Rs_ia.T(), Ts_ia) correspond to trans on ia coordinate system
    Ts_error_icp = (Rs_icp.T).dot(Ts_icp) - (Rs.T).dot(Ts)
    Ts_error_icp_norm = scale_geo*scale*LA.norm(Ts_error_icp)
    
    LOG.debug(  "Error (R,T) %s , %s ", Rs_error_icp_norm , Ts_error_icp_norm )
    
    #read source to target "Initial Alignment" Transformation
    #************Hs ICP Nprmals**************
    Tfile = src_features_dir + "/icp_transformation_" + str(percentile) + "_" + str(niter) + "_n.txt";    
    Tfis = open(Tfile, 'r')
    Hs_icn = np.genfromtxt(Tfis, usecols={0,1,2,3});
    Tfis.close()
    
    Hs_icn = Hs_icn.dot(Hs_ia)
    Rs_icn = Hs_icn[:3,:3]*(1.0/Ss_ia)
    Ts_icn = Hs_icn[:3,3]*(1.0/Ss_ia)
    
    LOG.debug( "\n************Hs ICP Normals************** \n R:\n%s \nT:\n%s \nH:\n%s", Rs_icn, Ts_icn, Hs_icn)

    #ICP errors    
    #Rotation error - half angle between the normalized quaterions
    quat_icn = tf.unit_vector(tf.quaternion_from_matrix(Rs_icn));
    Rs_error_icn_norm = math.acos(abs(np.dot(quat_icn, quat)));
    
    #Translation error
    # x = Rs_ia*x_ia + Ts_ia = Rs_ia(x_ia + np.dot(Rs_ia.T(), Ts_ia)
    # np.dot(Rs_ia.T(), Ts_ia) correspond to trans on ia coordinate system
    Ts_error_icn = (Rs_icn.T).dot(Ts_icn) - (Rs.T).dot(Ts)
    Ts_error_icn_norm = scale_geo*scale*LA.norm(Ts_error_icn)
    
    LOG.debug(  "Error (R,T) %s , %s ", Rs_error_icn_norm , Ts_error_icn_norm )
    
    ICP_error = np.array([Rs_error_icp_norm, Ts_error_icp_norm])
    ICN_error = np.array([Rs_error_icn_norm, Ts_error_icn_norm]);
    
    # import code; code.interact(local=locals())
    
    return ICP_error,ICN_error
Beispiel #55
0
def writeAnimation(filepath, human, humanBBox, config, animTrack):
    log.message("Exporting animation %s.", animTrack.name)
    numJoints = len(human.getSkeleton().getBones()) + 1

    animfilename = os.path.splitext(os.path.basename(filepath))[0]
    animfilename = animfilename + "_%s.md5anim" % (animTrack.name)
    foldername = os.path.dirname(filepath)
    animfilepath = os.path.join(foldername, animfilename)
    f = codecs.open(animfilepath, 'w', encoding="utf-8")
    f.write('MD5Version 10\n')
    f.write('commandline ""\n\n')
    f.write('numFrames %d\n' % animTrack.nFrames)
    f.write('numJoints %d\n' % numJoints)
    f.write('frameRate %d\n' % int(animTrack.frameRate))
    f.write('numAnimatedComponents %d\n\n' % (numJoints * 6))

    skel = human.getSkeleton()
    flags = 63
    f.write('hierarchy {\n')
    f.write('\t"origin" -1 %d 0\n' % flags)
    arrayIdx = 6
    for bIdx, bone in enumerate(skel.getBones()):
        #<string:jointName> <int:parentIndex> <int:flags> <int:startIndex>
        if bone.parent:
            f.write('\t"%s" %d %d %d\n' % (bone.name, bone.parent.index+1, flags, arrayIdx))
        else:
            f.write('\t"%s" %d %d %d\n' % (bone.name, 0, flags, arrayIdx))
        arrayIdx = arrayIdx + 6
    f.write('}\n\n')

    f.write('bounds {\n')
    bounds = humanBBox
    if config.feetOnGround:
        bounds[:][1] += getFeetOnGroundOffset(human)
    if config.zUp:
        bounds[0] = bounds[0][[0,2,1]] * [1,-1,1]
        bounds[1] = bounds[1][[0,2,1]] * [1,-1,1]
    bounds = bounds * scale
    # TODO use bounds calculated for every frame
    for frameIdx in xrange(animTrack.nFrames):
        #( vec3:boundMin ) ( vec3:boundMax )
        f.write('\t( %f %f %f ) ( %f %f %f )\n' % (bounds[0][0], bounds[0][1], bounds[0][2], bounds[1][0], bounds[1][1], bounds[1][2]))
    f.write('}\n\n')

    f.write('baseframe {\n')
    f.write('\t( %f %f %f ) ( %f %f %f )\n' % (0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
    bases = []
    for bone in skel.getBones():
        pos = bone.getRestOffset() * scale
        if config.feetOnGround and not bone.parent:
            pos[1] += (getFeetOnGroundOffset(human) * scale)

        transformationMat = bone.matRestRelative.copy()
        if config.zUp:
            transformationMat = np.dot(ZYRotation, np.dot(transformationMat,la.inv(ZYRotation)))
            pos = pos[[0,2,1]] * [1,-1,1]
        orientationQuat = tm.quaternion_from_matrix(transformationMat)

        qx = orientationQuat[0]
        qy = orientationQuat[1]
        qz = orientationQuat[2]
        w = orientationQuat[3]

        #( vec3:position ) ( vec3:orientation )
        f.write('\t( %f %f %f ) ( %f %f %f )\n' % (pos[0], pos[1], pos[2], qx, qy, qz))
        bases.append((pos, [qx, qy, qz, w]))
    f.write('}\n\n')

    for frameIdx in xrange(animTrack.nFrames):
        frame = animTrack.getAtFramePos(frameIdx)
        f.write('frame %d {\n' % frameIdx)
        f.write('\t%f %f %f %f %f %f\n' % (0.0, 0.0, 0.0, 0.0, 0.0, 0.0))  # Transformation for origin joint
        for bIdx in xrange(numJoints-1):
            transformationMat = frame[bIdx].copy()
            pos = transformationMat[:3,3] * scale
            transformationMat[:3,3] = [0.0, 0.0, 0.0]

            if config.zUp:
                transformationMat = np.dot(ZYRotation, np.dot(transformationMat,la.inv(ZYRotation)))
                pos = pos[[0,2,1]] * [1,-1,1]

            #baseRot = aljabr.quaternion2Matrix(bases[bIdx][1])
            #transformationMat = np.dot(transformationMat[:3,:3], baseRot)

            pos += bases[bIdx][0]
            orientationQuat = tm.quaternion_from_matrix(transformationMat)
            qx = orientationQuat[0]
            qy = orientationQuat[1]
            qz = orientationQuat[2]
            w = orientationQuat[3]

            if w > 0:
                qx = -qx
                qy = -qy
                qz = -qz

            # vec3:position vec3:orientation
            f.write('\t%f %f %f %f %f %f\n' % (pos[0], pos[1], pos[2], qx, qy, qz))
        f.write('}\n\n')

    f.close()
Beispiel #56
0
    def createAnimationTrack(self, skel=None, name=None):
        """
        Create an animation track from the motion stored in this BHV file.
        """
        def _bvhJointName(boneName):
            # Remove the tail from duplicate bone names (added by the BVH parser)
            import re
            if not boneName:
                return boneName
            r = re.search("(.*)_\d+$", boneName)
            if r:
                return r.group(1)
            return boneName

        def _createAnimation(jointsData, name, frameTime, nFrames):
            nJoints = len(jointsData)
            #nFrames = len(jointsData[0])

            # Interweave joints animation data, per frame with joints in breadth-first order
            animData = np.hstack(jointsData).reshape(nJoints*nFrames,3,4)
            framerate = 1.0/frameTime
            return animation.AnimationTrack(name, animData, nFrames, framerate)

        if name is None:
            name = self.name

        if skel is None:
            jointsData = [joint.matrixPoses for joint in self.getJoints() if not joint.isEndConnector()]
            # We leave out end effectors as they should not have animation data

            return _createAnimation(jointsData, name, self.frameTime, self.frameCount)
        elif isinstance(skel, list):
            # skel parameter is a list of joint or bone names
            jointsOrder = skel

            jointsData = []
            for jointName in jointsOrder:
                jointName = _bvhJointName(jointName)
                if jointName and self.getJointByCanonicalName(jointName) is not None:
                    poseMats = self.getJointByCanonicalName(jointName).matrixPoses.copy()
                    jointsData.append(poseMats)
                else:
                    jointsData.append(animation.emptyTrack(self.frameCount))

            return _createAnimation(jointsData, name, self.frameTime, self.frameCount)
        else:
            # skel parameter is a Skeleton
            jointsData = []
            for bone in skel.getBones():
                if len(bone.reference_bones) > 0:
                    # Combine the rotations of reference bones to influence this bone
                    bvhJoints = []
                    for bonename in bone.reference_bones:
                        jointname = _bvhJointName(bonename)
                        joint = self.getJointByCanonicalName(jointname)
                        if joint:
                            bvhJoints.append(joint)

                    if len(bvhJoints) == 0:
                        poseMats = animation.emptyTrack(self.frameCount)
                    elif len(bvhJoints) == 1:
                        poseMats = bvhJoints[0].matrixPoses.copy()
                    else:  # len(bvhJoints) >= 2:
                        # Combine the rotations using quaternions to simplify math and normalizing (rotations only)
                        poseMats = animation.emptyTrack(self.frameCount)
                        m = np.identity(4, dtype=np.float32)
                        for f_idx in xrange(self.frameCount):
                            m[:3,:4] = bvhJoints[0].matrixPoses[f_idx]
                            q1 = tm.quaternion_from_matrix(m, True)
                            m[:3,:4] = bvhJoints[1].matrixPoses[f_idx]
                            q2 = tm.quaternion_from_matrix(m, True)

                            quat = tm.quaternion_multiply(q2, q1)

                            for bvhJoint in bvhJoints[2:]:
                                m[:3,:4] = bvhJoint.matrixPoses[f_idx]
                                q = tm.quaternion_from_matrix(m, True)
                                quat = tm.quaternion_multiply(q, quat)

                            poseMats[f_idx] = tm.quaternion_matrix( quat )[:3,:4]

                    jointsData.append(poseMats)
                else:
                    # Map bone to joint by bone name
                    jointName = _bvhJointName(bone.name)
                    joint = self.getJointByCanonicalName(jointName)
                    if joint:
                        jointsData.append( joint.matrixPoses.copy() )
                    else:
                        jointsData.append(animation.emptyTrack(self.frameCount))

            return _createAnimation(jointsData, name, self.frameTime, self.frameCount)
def quatswap(quat):
    mat = transformations.quaternion_matrix(quat)
    mat_new = np.c_[mat[:,2],mat[:,1],-mat[:,0],mat[:,3]]
    return transformations.quaternion_from_matrix(mat_new)
def mat2quat(mat33):
    mat44 = np.eye(4)
    mat44[:3,:3] = mat33
    return transformations.quaternion_from_matrix(mat44)