Ejemplo n.º 1
0
def pose_transform(frame_pose, relative_pose):
    """
    pose is pos + quat: (x, y, z, w)
    Args:
        frame_pose:
        relative_pose:

    Returns:

    """
    frame_pose = np.array(frame_pose)
    relative_pose = np.array(relative_pose)

    frame_matrix = np.eye(4)
    frame_matrix[:3, :3] = quat2mat(frame_pose[[6, 3, 4, 5]])
    frame_matrix[:3, -1] = frame_pose[:3]

    relative_matrix = np.eye(4)
    relative_matrix[:3, :3] = quat2mat(relative_pose[[6, 3, 4, 5]])
    relative_matrix[:3, -1] = relative_pose[:3]

    new_pos_matrix = frame_matrix.dot(relative_matrix)

    pose = np.zeros(7)
    pose[:3] = new_pos_matrix[:3, -1]
    pose[3:] = mat2quat(new_pos_matrix[:3, :3])[[1, 2, 3, 0]]
    return pose
Ejemplo n.º 2
0
 def test_entrywise_product(self, q1, q2):
     # TODO use real matrices
     m1 = quat2mat(q1)
     m2 = quat2mat(q2)
     r1 = w.compile_and_execute(w.entrywise_product, [m1, m2])
     r2 = m1 * m2
     np.testing.assert_array_almost_equal(r1, r2)
Ejemplo n.º 3
0
def ori_transform(frame_ori, relative_ori):
    frame_matrix = quat2mat(frame_ori[[3, 0, 1, 2]])
    relative_matrix = quat2mat(relative_ori[[3, 0, 1, 2]])

    new_ori_matrix = frame_matrix.dot(relative_matrix)

    return mat2quat(new_ori_matrix)[[1, 2, 3, 0]]
Ejemplo n.º 4
0
def allocentric_to_egocentric(allo_pose, src_type="mat", dst_type="mat"):
    """Given an allocentric (object-centric) pose, compute new camera-centric
    pose Since we do detection on the image plane and our kernels are
    2D-translationally invariant, we need to ensure that rendered objects
    always look identical, independent of where we render them.

    Since objects further away from the optical center undergo skewing,
    we try to visually correct by rotating back the amount between
    optical center ray and object centroid ray. Another way to solve
    that might be translational variance
    (https://arxiv.org/abs/1807.03247)
    """
    # Compute rotation between ray to object centroid and optical center ray
    cam_ray = np.asarray([0, 0, 1.0])
    if src_type == "mat":
        trans = allo_pose[:3, 3]
    elif src_type == "quat":
        trans = allo_pose[4:7]
    else:
        raise ValueError(
            "src_type should be mat or quat, got: {}".format(src_type))
    obj_ray = trans.copy() / np.linalg.norm(trans)
    angle = math.acos(cam_ray.dot(obj_ray))

    # Rotate back by that amount
    if angle > 0:
        if dst_type == "mat":
            ego_pose = np.zeros((3, 4), dtype=allo_pose.dtype)
            ego_pose[:3, 3] = trans
            rot_mat = axangle2mat(axis=np.cross(cam_ray, obj_ray), angle=angle)
            if src_type == "mat":
                ego_pose[:3, :3] = np.dot(rot_mat, allo_pose[:3, :3])
            elif src_type == "quat":
                ego_pose[:3, :3] = np.dot(rot_mat, quat2mat(allo_pose[:4]))
        elif dst_type == "quat":
            ego_pose = np.zeros((7, ), dtype=allo_pose.dtype)
            ego_pose[4:7] = trans
            rot_q = axangle2quat(np.cross(cam_ray, obj_ray), angle)
            if src_type == "quat":
                ego_pose[:4] = qmult(rot_q, allo_pose[:4])
            elif src_type == "mat":
                ego_pose[:4] = qmult(rot_q, mat2quat(allo_pose[:3, :3]))
        else:
            raise ValueError(
                "dst_type should be mat or quat, got: {}".format(dst_type))
    else:  # allo to ego
        if src_type == "mat" and dst_type == "quat":
            ego_pose = np.zeros((7, ), dtype=allo_pose.dtype)
            ego_pose[:4] = mat2quat(allo_pose[:3, :3])
            ego_pose[4:7] = allo_pose[:3, 3]
        elif src_type == "quat" and dst_type == "mat":
            ego_pose = np.zeros((3, 4), dtype=allo_pose.dtype)
            ego_pose[:3, :3] = quat2mat(allo_pose[:4])
            ego_pose[:3, 3] = allo_pose[4:7]
        else:
            ego_pose = allo_pose.copy()
    return ego_pose
Ejemplo n.º 5
0
    def _evaluate(self, ind, pose_est, pose_tgt, intrinsic_matrix):

        if cfg.TEST.VISUALIZE:
            print('iteration %d' % (ind))

        # for each object
        for i in range(pose_est.shape[0]):
            cls = int(pose_est[i, 1])

            RT_est = np.zeros((3, 4), dtype=np.float32)
            RT_est[:3, :3] = quat2mat(pose_est[i, 2:6])
            RT_est[:, 3] = pose_est[i, 6:]

            RT_tgt = np.zeros((3, 4), dtype=np.float32)
            RT_tgt[:3, :3] = quat2mat(pose_tgt[i, 2:6])
            RT_tgt[:, 3] = pose_tgt[i, 6:]

            # rotation and translation error
            error_rot = re(RT_est[:3, :3], RT_tgt[:3, :3])
            error_tran = te(RT_est[:, 3], RT_tgt[:, 3])

            if self._classes[cls] == 'eggbox' and error_rot > 90:
                RT_z = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0]])
                RT_est = se3_mul(RT_est, RT_z)
                error_rot = re(RT_est[:3, :3], RT_tgt[:3, :3])
                error_tran = te(RT_est[:, 3], RT_tgt[:, 3])

            if error_rot < 5.0 and error_tran < 0.05:
                self._correct_poses[ind, 0] += 1
            if cfg.TEST.VISUALIZE:
                print('obj %d, rot %.2f, tran %.4f' %
                      (i + 1, error_rot, error_tran))

            # compute 6D pose error
            if self._classes[cls] == 'eggbox' or self._classes[cls] == 'glue':
                error = adi(RT_est[:3, :3], RT_est[:, 3], RT_tgt[:3, :3],
                            RT_tgt[:, 3], self._points[cls])
            else:
                error = add(RT_est[:3, :3], RT_est[:, 3], RT_tgt[:3, :3],
                            RT_tgt[:, 3], self._points[cls])

            threshold = 0.1 * np.linalg.norm(self._extents[cls, :])
            if error < threshold:
                self._correct_poses[ind, 1] += 1
            if cfg.TEST.VISUALIZE:
                print('average distance error: {}'.format(error))

            # reprojection error
            error_reprojection = reproj(intrinsic_matrix, RT_est[:3, :3],
                                        RT_est[:, 3], RT_tgt[:3, :3],
                                        RT_tgt[:, 3], self._points[cls])
            if error_reprojection < 5.0:
                self._correct_poses[ind, 2] += 1
            if cfg.TEST.VISUALIZE:
                print('reprojection error: {}'.format(error_reprojection))
Ejemplo n.º 6
0
def RT_transform_batch_cpu(quaternion_delta, translation, poses_src):
    poses_tgt = poses_src.copy()
    for i in range(poses_src.shape[0]):
        cls = int(poses_src[i, 1]) if quaternion_delta.shape[1] > 4 else 0
        if all(poses_src[i, 2:] == 0):
            poses_tgt[i, 2:] = 0
        else:
            poses_tgt[i, 2:6] = mat2quat(
                np.dot(quat2mat(quaternion_delta[i, 4 * cls:4 * cls + 4]), quat2mat(poses_src[i, 2:6])))
            poses_tgt[i, 6:] = translation[i, 3 * cls:3 * cls + 3]
    return poses_tgt
Ejemplo n.º 7
0
    def get_six_dof_act(self, transform_params, heightmap, pose0, pose1):
        """Adjust SE(3) poses via the in-plane SE(2) augmentation transform."""
        p1_position, p1_rotation = pose1[0], pose1[1]
        p0_position, p0_rotation = pose0[0], pose0[1]

        if transform_params is not None:
            t_world_center, t_world_centernew = utils.get_se3_from_image_transform(
                transform_params[0], transform_params[1], transform_params[2],
                heightmap, self.bounds, self.pixel_size)
            t_worldnew_world = t_world_centernew @ np.linalg.inv(t_world_center)
        else:
            t_worldnew_world = np.eye(4)

        p1_quat_wxyz = (p1_rotation[3], p1_rotation[0], p1_rotation[1],
                        p1_rotation[2])
        t_world_p1 = quaternions.quat2mat(p1_quat_wxyz)
        t_world_p1[0:3, 3] = np.array(p1_position)

        t_worldnew_p1 = t_worldnew_world @ t_world_p1

        p0_quat_wxyz = (p0_rotation[3], p0_rotation[0], p0_rotation[1],
                        p0_rotation[2])
        t_world_p0 = quaternions.quat2mat(p0_quat_wxyz)
        t_world_p0[0:3, 3] = np.array(p0_position)
        t_worldnew_p0 = t_worldnew_world @ t_world_p0

        t_worldnew_p0theta0 = t_worldnew_p0 * 1.0
        t_worldnew_p0theta0[0:3, 0:3] = np.eye(3)

        # PLACE FRAME, adjusted for this 0 rotation on pick
        t_p0_p0theta0 = np.linalg.inv(t_worldnew_p0) @ t_worldnew_p0theta0
        t_worldnew_p1theta0 = t_worldnew_p1 @ t_p0_p0theta0

        # convert the above rotation to euler
        quatwxyz_worldnew_p1theta0 = quaternions.mat2quat(
            t_worldnew_p1theta0)
        q = quatwxyz_worldnew_p1theta0
        quatxyzw_worldnew_p1theta0 = (q[1], q[2], q[3], q[0])
        p1_rotation = quatxyzw_worldnew_p1theta0
        p1_euler = utils.quatXYZW_to_eulerXYZ(p1_rotation)

        roll_scaled = p1_euler[0] / self.theta_scale
        pitch_scaled = p1_euler[1] / self.theta_scale
        p1_theta_scaled = -p1_euler[2] / self.theta_scale

        x = p1_position[0]
        y = p1_position[1]
        z = p1_position[2]

        return np.array([x, y, p1_theta_scaled, roll_scaled, pitch_scaled, z])
Ejemplo n.º 8
0
def egocentric_to_allocentric(ego_pose,
                              src_type="mat",
                              dst_type="mat",
                              cam_ray=(0, 0, 1.0)):
    # Compute rotation between ray to object centroid and optical center ray
    cam_ray = np.asarray(cam_ray)
    if src_type == "mat":
        trans = ego_pose[:3, 3]
    elif src_type == "quat":
        trans = ego_pose[4:7]
    else:
        raise ValueError(
            "src_type should be mat or quat, got: {}".format(src_type))
    obj_ray = trans.copy() / np.linalg.norm(trans)
    angle = math.acos(cam_ray.dot(obj_ray))

    # Rotate back by that amount
    if angle > 0:
        if dst_type == "mat":
            allo_pose = np.zeros((3, 4), dtype=ego_pose.dtype)
            allo_pose[:3, 3] = trans
            rot_mat = axangle2mat(axis=np.cross(cam_ray, obj_ray),
                                  angle=-angle)
            if src_type == "mat":
                allo_pose[:3, :3] = np.dot(rot_mat, ego_pose[:3, :3])
            elif src_type == "quat":
                allo_pose[:3, :3] = np.dot(rot_mat, quat2mat(ego_pose[:4]))
        elif dst_type == "quat":
            allo_pose = np.zeros((7, ), dtype=ego_pose.dtype)
            allo_pose[4:7] = trans
            rot_q = axangle2quat(np.cross(cam_ray, obj_ray), -angle)
            if src_type == "quat":
                allo_pose[:4] = qmult(rot_q, ego_pose[:4])
            elif src_type == "mat":
                allo_pose[:4] = qmult(rot_q, mat2quat(ego_pose[:3, :3]))
        else:
            raise ValueError(
                "dst_type should be mat or quat, got: {}".format(dst_type))
    else:
        if src_type == "mat" and dst_type == "quat":
            allo_pose = np.zeros((7, ), dtype=ego_pose.dtype)
            allo_pose[:4] = mat2quat(ego_pose[:3, :3])
            allo_pose[4:7] = ego_pose[:3, 3]
        elif src_type == "quat" and dst_type == "mat":
            allo_pose = np.zeros((3, 4), dtype=ego_pose.dtype)
            allo_pose[:3, :3] = quat2mat(ego_pose[:4])
            allo_pose[:3, 3] = ego_pose[4:7]
        else:
            allo_pose = ego_pose.copy()
    return allo_pose
Ejemplo n.º 9
0
def test_quat2mat():
    # also tested in roundtrip case below
    M = tq.quat2mat([1, 0, 0, 0])
    assert_array_almost_equal(M, np.eye(3))
    # Non-unit quaternion
    M = tq.quat2mat([3, 0, 0, 0])
    assert_array_almost_equal(M, np.eye(3))
    M = tq.quat2mat([0, 1, 0, 0])
    assert_array_almost_equal(M, np.diag([1, -1, -1]))
    # Non-unit quaternion, same result as normalized
    M = tq.quat2mat([0, 2, 0, 0])
    assert_array_almost_equal(M, np.diag([1, -1, -1]))
    assert_array_almost_equal(M, np.diag([1, -1, -1]))
    M = tq.quat2mat([0, 0, 0, 0])
    assert_array_almost_equal(M, np.eye(3))
Ejemplo n.º 10
0
def test_quat2mat():
    # also tested in roundtrip case below
    M = tq.quat2mat([1, 0, 0, 0])
    assert_array_almost_equal(M, np.eye(3))
    # Non-unit quaternion
    M = tq.quat2mat([3, 0, 0, 0])
    assert_array_almost_equal(M, np.eye(3))
    M = tq.quat2mat([0, 1, 0, 0])
    assert_array_almost_equal(M, np.diag([1, -1, -1]))
    # Non-unit quaternion, same result as normalized
    M = tq.quat2mat([0, 2, 0, 0])
    assert_array_almost_equal(M, np.diag([1, -1, -1]))
    assert_array_almost_equal(M, np.diag([1, -1, -1]))
    M = tq.quat2mat([0, 0, 0, 0])
    assert_array_almost_equal(M, np.eye(3))
Ejemplo n.º 11
0
def quat_trans_to_pose_m(quat, trans):
    se3_mx = np.zeros((3, 4))
    # quat = quat / LA.norm(quat)
    R = quat2mat(quat)  # normalize internally
    se3_mx[:, :3] = R
    se3_mx[:, 3] = trans
    return se3_mx
Ejemplo n.º 12
0
    def extract_x_y_theta(self,
                          object_info,
                          t_worldaug_world=None,
                          preserve_theta=False):
        """Extract in-plane theta."""
        object_position = object_info[0]
        object_quat_xyzw = object_info[1]

        if t_worldaug_world is not None:
            object_quat_wxyz = (object_quat_xyzw[3], object_quat_xyzw[0],
                                object_quat_xyzw[1], object_quat_xyzw[2])
            t_world_object = quaternions.quat2mat(object_quat_wxyz)
            #t_world_object[0:3, 3] = np.array(object_position)
            p0_position_2d = np.reshape(object_position, (3, 1))
            t_world_object = np.append(t_world_object, p0_position_2d, axis=1)
            arr = np.transpose(np.array([[0], [0], [0], [1]]))
            t_world_object = np.append(t_world_object, arr, axis=0)

            t_worldaug_object = t_worldaug_world @ t_world_object
            t_worldaug_object = t_worldaug_object[0:3, 0:3]

            object_quat_wxyz = quaternions.mat2quat(t_worldaug_object)
            if not preserve_theta:
                object_quat_xyzw = (object_quat_wxyz[1], object_quat_wxyz[2],
                                    object_quat_wxyz[3], object_quat_wxyz[0])
            t_worldaug_object_two = t_worldaug_world @ t_world_object
            object_position = t_worldaug_object_two[0:3, 3]

        object_xy = object_position[0:2]
        object_theta = -np.float32(
            utils.quatXYZW_to_eulerXYZ(object_quat_xyzw)[2]) / self.theta_scale
        return np.hstack((object_xy, object_theta)).astype(
            np.float32), object_position, object_quat_xyzw
Ejemplo n.º 13
0
def print_poses(poses):
    print 'translations'
    print poses[:, :3]
    print 'euler'
    for i in xrange(poses.shape[0]):
        a = txe.mat2euler(txq.quat2mat(poses[i, 3:]))
        print[np.rad2deg(aa) for aa in a]
Ejemplo n.º 14
0
    def extract_global_position(self, cam_pos_x):
        # Convert camera position to relative polar coordinate of robot
        depth_index = self.MIN_VISUAL_DEPTH_INDEX + int(
            float(cam_pos_x) * self.VISUAL_DEPTH_RANGE / self.VISUAL_RGB_RANGE)
        depth = self.depth_data[depth_index]
        if depth_index > 10:
            depth = max(max(depth, self.depth_data[depth_index - 5]),
                        self.depth_data[depth_index - 10])
        if depth_index < 501:
            depth = max(max(depth, self.depth_data[depth_index + 5]),
                        self.depth_data[depth_index + 10])
        degree = self.MIN_DEGREE + (float(cam_pos_x * self.VISUAL_DEGREE_RANGE)
                                    / self.VISUAL_RGB_RANGE)

        # Convert relative polar transformation to relative cartesian transformation matrix
        rel_pos_x = depth * np.cos(np.deg2rad(degree))
        rel_pos_y = depth * np.sin(np.deg2rad(degree))
        obj_vec = np.array([rel_pos_x, rel_pos_y, 0, 1])

        # Convert cartesian coordinate to absolute cartesian coordinate
        rot_mat_robot = quat2mat([
            self.orientation.w, self.orientation.x, self.orientation.y,
            self.orientation.z
        ])
        trans_mat_robot = compose(
            np.array([self.position.x, self.position.y, self.position.z]),
            rot_mat_robot, np.ones(3))

        # Perform transformation
        pos_x, pos_y, pos_z = np.dot(trans_mat_robot, obj_vec)[:3]

        return pos_x, pos_y
Ejemplo n.º 15
0
    def icp_refine_(self, pose, anno, output):
        depth = read_depth(anno['depth_path']).astype(np.uint16)
        mask = torch.argmax(output['seg'], dim=1)[0].detach().cpu().numpy()
        mask = mask.astype(np.int32)
        pose = pose.astype(np.float32)

        poses = np.zeros([1, 7], dtype=np.float32)
        poses[0, :4] = mat2quat(pose[:, :3])
        poses[0, 4:] = pose[:, 3]

        poses_new = np.zeros([1, 7], dtype=np.float32)
        poses_icp = np.zeros([1, 7], dtype=np.float32)

        fx = 572.41140
        fy = 573.57043
        px = 325.26110
        py = 242.04899
        zfar = 6.0
        znear = 0.25
        factor = 1000.0
        error_threshold = 0.01

        rois = np.zeros([1, 6], dtype=np.float32)
        rois[:, :] = 1

        self.icp_refiner.solveICP(mask, depth, self.height, self.width, fx, fy,
                                  px, py, znear, zfar, factor, rois.shape[0],
                                  rois, poses, poses_new, poses_icp,
                                  error_threshold)

        pose_icp = np.zeros([3, 4], dtype=np.float32)
        pose_icp[:, :3] = quat2mat(poses_icp[0, :4])
        pose_icp[:, 3] = poses_icp[0, 4:]

        return pose_icp
Ejemplo n.º 16
0
def test_quaternion_reconstruction():
    # Test reconstruction of arbitrary unit quaternions
    for q in unit_quats:
        M = tq.quat2mat(q)
        qt = tq.mat2quat(M)
        # Accept positive or negative match
        posm = np.allclose(q, qt)
        negm = np.allclose(q, -qt)
        assert posm or negm
    # Test vectorized operations
    Q = np.array(list(unit_quats))
    M = tq.quat2mat(Q)
    QT = tq.mat2quat(M)
    posm = np.isclose(Q, QT)
    negm = np.isclose(Q, -QT)
    assert np.logical_or(posm, negm).all()
Ejemplo n.º 17
0
    def get_obs(self, env):
        """
        Get current LiDAR sensor reading and occupancy grid (optional)

        :return: LiDAR sensor reading and local occupancy grid, normalized to [0.0, 1.0]
        """
        laser_angular_half_range = self.laser_angular_range / 2.0
        if self.laser_link_name not in env.robots[0].parts:
            raise Exception('Trying to simulate LiDAR sensor, but laser_link_name cannot be found in the robot URDF file. Please add a link named laser_link_name at the intended laser pose. Feel free to check out assets/models/turtlebot/turtlebot.urdf and examples/configs/turtlebot_p2p_nav.yaml for examples.')
        laser_pose = env.robots[0].parts[self.laser_link_name].get_pose()
        angle = np.arange(
            -laser_angular_half_range / 180 * np.pi,
            laser_angular_half_range / 180 * np.pi,
            self.laser_angular_range / 180.0 * np.pi / self.n_horizontal_rays)
        unit_vector_local = np.array(
            [[np.cos(ang), np.sin(ang), 0.0] for ang in angle])
        transform_matrix = quat2mat(
            [laser_pose[6], laser_pose[3], laser_pose[4], laser_pose[5]])  # [x, y, z, w]
        unit_vector_world = transform_matrix.dot(unit_vector_local.T).T

        start_pose = np.tile(laser_pose[:3], (self.n_horizontal_rays, 1))
        start_pose += unit_vector_world * self.min_laser_dist
        end_pose = laser_pose[:3] + unit_vector_world * self.laser_linear_range
        results = p.rayTestBatch(start_pose, end_pose, 6)  # numThreads = 6

        # hit fraction = [0.0, 1.0] of self.laser_linear_range
        hit_fraction = np.array([item[2] for item in results])
        hit_fraction = self.noise_model.add_noise(hit_fraction)
        scan = np.expand_dims(hit_fraction, 1)

        state = {}
        state['scan'] = scan
        if 'occupancy_grid' in self.modalities:
            state['occupancy_grid'] = self.get_local_occupancy_grid(scan)
        return state
Ejemplo n.º 18
0
def get_position_now(limb):
    current_pose = limb.endpoint_pose()
    x = current_pose['position'].x
    y = current_pose['position'].y
    z = current_pose['position'].z

    rx = current_pose['orientation'].x
    ry = current_pose['orientation'].y
    rz = current_pose['orientation'].z
    w = current_pose['orientation'].w
    matrix = quaternions.quat2mat([w, rx, ry, rz])
    pos_origin = np.array([x, y, z])
    pos = np.dot(matrix, offset_probe) + pos_origin
    x = pos[0]
    y = pos[1]
    z = pos[2]
    print(x, y, z, end=' ')
    print()

    file = "/home/philos/Desktop/output.txt"
    with open(file, 'a') as f:
        json.dump(x, f, ensure_ascii=True)
        f.write(" ")
        json.dump(y, f, ensure_ascii=True)
        f.write(" ")
        json.dump(z, f, ensure_ascii=True)
        f.write(" ")
        f.write("\n")
Ejemplo n.º 19
0
def print_poses(poses):
    print('translations')
    print(poses[:, :3])
    print('euler')
    for i in range(poses.shape[0]):
        a = txe.mat2euler(txq.quat2mat(poses[i, 3:]))
        print([np.rad2deg(aa) for aa in a])
Ejemplo n.º 20
0
    def get_scan(self):
        """
        :return: LiDAR sensor reading, normalized to [0.0, 1.0]
        """
        laser_angular_half_range = self.laser_angular_range / 2.0
        if self.laser_link_name not in self.robots[0].parts:
            raise Exception('Trying to simulate LiDAR sensor, but laser_link_name cannot be found in the robot URDF file. Please add a link named laser_link_name at the intended laser pose. Feel free to check out assets/models/turtlebot/turtlebot.urdf and examples/configs/turtlebot_p2p_nav.yaml for examples.')
        laser_pose = self.robots[0].parts[self.laser_link_name].get_pose()
        angle = np.arange(-laser_angular_half_range / 180 * np.pi,
                          laser_angular_half_range / 180 * np.pi,
                          self.laser_angular_range / 180.0 * np.pi / self.n_horizontal_rays)
        unit_vector_local = np.array([[np.cos(ang), np.sin(ang), 0.0] for ang in angle])
        transform_matrix = quat2mat([laser_pose[6], laser_pose[3], laser_pose[4], laser_pose[5]])  # [x, y, z, w]
        unit_vector_world = transform_matrix.dot(unit_vector_local.T).T

        start_pose = np.tile(laser_pose[:3], (self.n_horizontal_rays, 1))
        start_pose += unit_vector_world * self.min_laser_dist
        end_pose = laser_pose[:3] + unit_vector_world * self.laser_linear_range
        results = p.rayTestBatch(start_pose, end_pose, 6)  # numThreads = 6

        hit_fraction = np.array([item[2] for item in results])  # hit fraction = [0.0, 1.0] of self.laser_linear_range
        hit_fraction = self.add_naive_noise_to_sensor(hit_fraction, self.scan_noise_rate)
        scan = np.expand_dims(hit_fraction, 1)

        xyz = hit_fraction[:, np.newaxis] * unit_vector_local * 10
        xyz = xyz[np.equal(np.isnan(xyz), False)]
        xyz = xyz.reshape(xyz.shape[0] // 3, -1)
        return xyz#scan
Ejemplo n.º 21
0
    def posquat2Matrix(self, pos, quat):
        T = np.eye(4)
        T[0:3,
          0:3] = quaternions.quat2mat([quat[-1], quat[0], quat[1], quat[2]])
        T[0:3, 3] = pos

        return np.array(T)
Ejemplo n.º 22
0
def test_pose_utils():
    """
    Tests the pose utils
    :return:
    """
    TEST_COMPOSE = True
    TEST_INV = True

    def ra(_): return np.random.uniform(0, 2 * math.pi)

    if TEST_COMPOSE:
        print('Testing pose composing...')
        R1 = txe.euler2mat(ra(1), ra(1), ra(1))
        t1 = np.random.rand(3)
        R2 = txe.euler2mat(ra(1), ra(1), ra(1))
        t2 = np.random.rand(3)

        # homogeneous matrix method
        R = np.dot(R1, R2)
        t = t1 + np.dot(R1, t2)
        print('From homogeneous matrices, t = ')
        print(t)
        print('R = ')
        print(R)

        # quaternion method
        q1 = txq.mat2quat(R1)
        q2 = txq.mat2quat(R2)

        p1 = torch.cat((torch.from_numpy(t1), torch.from_numpy(q1)))
        p2 = torch.cat((torch.from_numpy(t2), torch.from_numpy(q2)))
        p = compose_pose_quaternion(
            torch.unsqueeze(
                p1, 0), torch.unsqueeze(
                p2, 0))
        t = p[:, :3].numpy().squeeze()
        q = p[:, 3:].numpy().squeeze()
        print('From quaternions, t = ')
        print(t)
        print('R = ')
        print(txe.quat2mat(q))

    if TEST_INV:
        print('Testing pose inversion...')
        R = txe.euler2mat(ra(1), ra(1), ra(1))
        t = np.random.rand(3)
        T = np.eye(4)
        T[:3, :3] = R
        T[:3, -1] = t

        q = txq.mat2quat(R)
        p = torch.cat((torch.from_numpy(t), torch.from_numpy(q)))
        pinv = invert_pose_quaternion(torch.unsqueeze(p, 0))
        tinv, qinv = pinv[:, :3], pinv[:, 3:]
        Rinv = txq.quat2mat(qinv.numpy().squeeze())
        Tinv = np.eye(4)
        Tinv[:3, :3] = Rinv
        Tinv[:3, -1] = tinv.numpy().squeeze()
        print('T * T^(-1) = ')
        print(np.dot(T, Tinv))
Ejemplo n.º 23
0
def RT_transform(pose_src, r, t, T_means, T_stds, rot_coord="MODEL"):
    # r: 4(quat) or 3(euler) number
    # t: 3 number, (delta_x, delta_y, scale)
    r = np.squeeze(r)
    if r.shape[0] == 3:
        Rm_delta = euler2mat(r[0], r[1], r[2])
    elif r.shape[0] == 4:
        # QUAT
        quat = r / LA.norm(r)
        Rm_delta = quat2mat(quat)
    else:
        raise Exception("Unknown r shape: {}".format(r.shape))
    t_delta = np.squeeze(t)

    if rot_coord.lower() == "naive":
        se3_mx = np.zeros((3, 4))
        se3_mx[:, :3] = Rm_delta
        se3_mx[:, 3] = t
        pose_est = se3_mul(se3_mx, pose_src)
    else:
        pose_est = np.zeros((3, 4))
        pose_est[:3, :3] = R_transform(pose_src[:3, :3], Rm_delta, rot_coord)
        pose_est[:3, 3] = T_transform(pose_src[:, 3], t_delta, T_means, T_stds,
                                      rot_coord)

    return pose_est
Ejemplo n.º 24
0
def pose_to_matrix(pose):
    t = pose[:3].reshape(3, 1)
    quat_tmp = pose[3:]
    quat = [quat_tmp[3], quat_tmp[0], quat_tmp[1], quat_tmp[2]]
    R = quaternions.quat2mat(quat)
    T = np.vstack((np.hstack((R, t)), [0, 0, 0, 1]))
    return T
Ejemplo n.º 25
0
def rotate_pose(point, quat, theta, axis="x"):
    # set quaternion to transforms3d format, i.e. w,x,y,z
    quat_t3d = [quat[3], quat[0], quat[1], quat[2]]
    rot_mat = quaternions.quat2mat(quat_t3d)
       
    H1 = np.eye(4)
    H1[:3,:3] = rot_mat
    H1[:3,3] = point

    H2 = np.eye(4) 
    if axis == "x":
        rot_axis = np.array(eulerangles.x_rotation(theta)) 
    elif axis == "y":
        rot_axis = np.array(eulerangles.y_rotation(theta)) 
    elif axis == "z":
        rot_axis = np.array(eulerangles.z_rotation(theta))
        
    print rot_axis H2[:3,:3] = rot_axis
    H3 = np.dot(H1, H2)
    point_out = H3[:3,3]
    quat_t3dout = quaternions.mat2quat(H3[:3,:3])
    # Turn to ROS format, i.e. x, y, z, w
    quat_out = [quat_t3dout[1], quat_t3dout[2], quat_t3dout[3], quat_t3do
ut[0]]
    rospy.loginfo("Rotated pose: ") 
    print point_out, quat_out
    
    return point_out, quat_out
Ejemplo n.º 26
0
def quat2euler(q):
    ''' Return Euler angles corresponding to quaternion `q`

    Parameters
    ----------
    q : 4 element sequence
       w, x, y, z of quaternion

    Returns
    -------
    z : scalar
       Rotation angle in radians around z-axis (performed first)
    y : scalar
       Rotation angle in radians around y-axis
    x : scalar
       Rotation angle in radians around x-axis (performed last)

    Notes
    -----
    It's possible to reduce the amount of calculation a little, by
    combining parts of the ``quat2mat`` and ``mat2euler`` functions, but
    the reduction in computation is small, and the code repetition is
    large.
    '''
    # delayed import to avoid cyclic dependencies
    import transforms3d.quaternions as nq
    return mat2euler(nq.quat2mat(q))
Ejemplo n.º 27
0
    def process_input(self):
        """
        Function to extract data from the input array.
        Input array is the output of the optimization step
        which holds the generated sparse model of the object
        and the relative scene transformations.
        """
        #get the relative scene transforamtions from input array
        self.scene_tfs = np.empty((0, 4, 4))
        for input_arr in self.input_arrays:
            num_scenes = input_arr['ref'].shape[0]
            out_ts = input_arr['scenes'][:(num_scenes) * 3].reshape(
                (num_scenes, 3))
            out_qs = input_arr['scenes'][(num_scenes) * 3:(num_scenes) *
                                         7].reshape((num_scenes, 4))
            out_tfs = np.asarray([
                tfa.compose(t, tfq.quat2mat(q), np.ones(3))
                for t, q in zip(out_ts, out_qs)
            ])
            self.scene_tfs = np.vstack((self.scene_tfs, out_tfs))

        self.object_model = []
        for obj in self.model_paths:
            #read sparse model from input array
            sparse_model = SparseModel().reader(obj)
            #read dense model from .PLY file
            dense_model = o3d.io.read_point_cloud(
                os.path.join(os.path.dirname(obj), "dense.ply"))
            self.object_model.append(
                [sparse_model, np.asarray(dense_model.points)])
        return
Ejemplo n.º 28
0
    def get_xyz_points(self, quat):
        """
    Set absolute (global) rotation for the hand.

    Parameters
    ----------
    quat : np.ndarray, shape [J, 4]
      Absolute rotations for each joint in quaternion.

    Returns
    -------
    np.ndarray, shape [V, 3]
      Mesh vertices after posing.
    """
        mats = []
        for j in range(MANOHandJoints.n_joints):
            mats.append(quat2mat(quat[j]))
        mats = np.stack(mats, 0)

        pose = np.matmul(mats, self.ref_pose)
        joint_xyz = [None] * MANOHandJoints.n_joints
        for j in range(MANOHandJoints.n_joints):
            joint_xyz[j] = pose[j]
            parent = MANOHandJoints.parents[j]
            if parent is not None:
                joint_xyz[j] += joint_xyz[parent]
        joint_xyz = np.stack(joint_xyz, 0)[..., 0]

        return joint_xyz
Ejemplo n.º 29
0
    def extract_x_y_theta(self,
                          object_info,
                          t_worldaug_world=None,
                          preserve_theta=False):
        """Extract in-plane theta."""
        object_position = object_info[0]
        object_quat_xyzw = object_info[1]

        if t_worldaug_world is not None:
            object_quat_wxyz = (object_quat_xyzw[3], object_quat_xyzw[0],
                                object_quat_xyzw[1], object_quat_xyzw[2])
            t_world_object = quaternions.quat2mat(object_quat_wxyz)
            t_world_object[0:3, 3] = np.array(object_position)
            t_worldaug_object = t_worldaug_world @ t_world_object

            object_quat_wxyz = quaternions.mat2quat(t_worldaug_object)
            if not preserve_theta:
                object_quat_xyzw = (object_quat_wxyz[1], object_quat_wxyz[2],
                                    object_quat_wxyz[3], object_quat_wxyz[0])
            object_position = t_worldaug_object[0:3, 3]

        object_xy = object_position[0:2]
        object_theta = -np.float32(
            utils.quatXYZW_to_eulerXYZ(object_quat_xyzw)
            [2]) / self.theta_scale
        return np.hstack(
            (object_xy,
             object_theta)).astype(np.float32), object_position, object_quat_xyzw
Ejemplo n.º 30
0
    def get_six_dof_object(self, object_info, t_worldaug_world=None):
        """Calculate the pose of 6DOF object."""
        object_position = object_info[0]
        object_quat_xyzw = object_info[1]

        if t_worldaug_world is not None:
            object_quat_wxyz = (object_quat_xyzw[3], object_quat_xyzw[0],
                                object_quat_xyzw[1], object_quat_xyzw[2])
            t_world_object = quaternions.quat2mat(object_quat_wxyz)
            t_world_object[0:3, 3] = np.array(object_position)
            t_worldaug_object = t_worldaug_world @ t_world_object

            object_quat_wxyz = quaternions.mat2quat(
                t_worldaug_object)
            object_quat_xyzw = (object_quat_wxyz[1], object_quat_wxyz[2],
                                object_quat_wxyz[3], object_quat_wxyz[0])
            object_position = t_worldaug_object[0:3, 3]

        euler = utils.quatXYZW_to_eulerXYZ(object_quat_xyzw)
        roll = euler[0]
        pitch = euler[1]
        theta = -euler[2]

        return np.asarray([
            object_position[0], object_position[1], object_position[2], roll, pitch,
            theta
        ])
Ejemplo n.º 31
0
 def gen_mat(axis, degree):
     axis = axis / LA.norm(axis)
     return quat2mat([
         cos(degree / 360.0 * pi),
         axis[0] * sin(degree / 360.0 * pi),
         axis[1] * sin(degree / 360.0 * pi),
         axis[2] * sin(degree / 360.0 * pi),
     ])
Ejemplo n.º 32
0
def translations_quaternions_to_transform(pose):
    t = pose[:3]
    q = pose[3:]

    T = np.eye(4)
    T[:3, :3] = quat2mat(q)
    T[:3, 3] = t
    return T
Ejemplo n.º 33
0
def test_quaternion_reconstruction():
    # Test reconstruction of arbitrary unit quaternions
    for q in unit_quats:
        M = tq.quat2mat(q)
        qt = tq.mat2quat(M)
        # Accept positive or negative match
        posm = np.allclose(q, qt)
        negm = np.allclose(q, -qt)
        assert posm or negm
Ejemplo n.º 34
0
def test_qinverse():
    # Takes sequence
    iq = tq.qinverse((1, 0, 0, 0))
    # Returns float type
    assert iq.dtype.kind == 'f'
    for M, q in eg_pairs:
        iq = tq.qinverse(q)
        iqM = tq.quat2mat(iq)
        iM = np.linalg.inv(M)
        assert np.allclose(iM, iqM)
Ejemplo n.º 35
0
def test_qeye():
    qi = tq.qeye()
    assert qi.dtype.kind == 'f'
    assert np.all([1,0,0,0]==qi)
    assert np.allclose(tq.quat2mat(qi), np.eye(3))
Ejemplo n.º 36
0
def test_qmult():
    # Test that quaternion * same as matrix * 
    for M1, q1 in eg_pairs[0::4]:
        for M2, q2 in eg_pairs[1::4]:
            q21 = tq.qmult(q2, q1)
            assert_array_almost_equal(np.dot(M2,M1), tq.quat2mat(q21))
Ejemplo n.º 37
0
        if filter_ground_points:
            key_frame_id = 0
            keyframe_quaternion = None
            keyframe_location = None
            while key_frame_id < len(timestamps):
                try:
                    keyframe_quaternion = np.array(keyframe_quaternions_dict[timestamps[key_frame_id]])
                    keyframe_location = keyframe_locations_dict[timestamps[key_frame_id]]
                    break
                except KeyError:
                    key_frame_id += 1
            if keyframe_quaternion is None:
                raise StandardError('No valid keyframes found for point {:d}'.format(n_points))
            # normalize quaternion
            keyframe_quaternion /= np.linalg.norm(keyframe_quaternion)
            keyframe_rotation = quaternions.quat2mat(keyframe_quaternion)
            keyframe_translation = np.matrix(keyframe_location).transpose()
            transform_mat = np.zeros([4, 4], dtype=np.float64)
            transform_mat[0:3, 0:3] = keyframe_rotation.transpose()
            transform_mat[3, 0:3] = (np.matrix(-keyframe_rotation.transpose()) * keyframe_translation).ravel()
            transform_mat[3, 3] = 1
            point_location = np.matrix([point_x, point_y, point_z, 1]).transpose()
            transformed_point_location = np.matrix(transform_mat) * point_location
            # homogeneous to non homogeneous coordinates
            transformed_point_height = transformed_point_location[1] / transformed_point_location[3]
            if transformed_point_height < 0:
                is_ground_point[-1] = True

        point_locations.append([point_x, point_y, point_z])
        point_timestamps.append(timestamps)
        n_points += 1