예제 #1
0
def test_png_export():
    """Test if the graph can be exported to PNG."""
    random_state = np.random.RandomState(0)

    ee2robot = transform_from_pq(
        np.hstack((np.array([0.4, -0.3,
                             0.5]), random_quaternion(random_state))))
    cam2robot = transform_from_pq(np.hstack((np.array([0.0, 0.0, 0.8]), q_id)))
    object2cam = transform_from(
        active_matrix_from_intrinsic_euler_xyz(np.array([0.0, 0.0, 0.5])),
        np.array([0.5, 0.1, 0.1]))

    tm = TransformManager()
    tm.add_transform("end-effector", "robot", ee2robot)
    tm.add_transform("camera", "robot", cam2robot)
    tm.add_transform("object", "camera", object2cam)

    _, filename = tempfile.mkstemp(".png")
    try:
        tm.write_png(filename)
        assert_true(os.path.exists(filename))
    except ImportError:
        raise SkipTest("pydot is required for this test")
    finally:
        if os.path.exists(filename):
            try:
                os.remove(filename)
            except WindowsError:
                pass  # workaround for permission problem on Windows
예제 #2
0
def test_transforms_from_pqs_0dims():
    random_state = np.random.RandomState(0)
    pq = np.empty(7)
    pq[:3] = random_state.randn(3)
    pq[3:] = random_quaternion(random_state)
    A2B = transforms_from_pqs(pq, False)
    assert_array_almost_equal(A2B, transform_from_pq(pq))
예제 #3
0
    def compute(self):
        for key in self.cameras_dict:
            frames = self.cameras_dict[key][2].wait_for_frames()
            f = frames.first_or_default(rs.stream.pose)
            data = f.as_pose_frame().get_pose_data()
            #self.cameras_dict[key][3] = data
            self.cameras_dict[key][1].add_transform("world", "slam_sensor", pytr.transform_from_pq([data.translation.x * 1000.0,
                                                                                                    -data.translation.z * 1000.0,
                                                                                                    data.translation.y * 1000.0,
                                                                                                    data.rotation.w,
                                                                                                    data.rotation.x,
                                                                                                    data.rotation.y,
                                                                                                    data.rotation.z]))

    
            t = self.cameras_dict[key][1].get_transform("world", "origin")
            self.cameras_dict[key][3] = pytr.transform(t,[0,0,0,1])[0:3]
            self.cameras_dict[key][4] = self.quaternion_to_euler_angle(data.rotation.w, data.rotation.x, data.rotation.y, data.rotation.z)
            self.cameras_dict[key][5] = data.mapper_confidence
            self.cameras_dict[key][6] = data.tracker_confidence
            

        # print
        if self.print:
            ret = RoboCompFullPoseEstimation.FullPoseEuler()
            sigma = 0
            #CALCULATE ADDITION BOTH DATA'S CAMERA
            for key in self.cameras_dict:
                ret.x = ret.x + self.cameras_dict[key][3][0] * self.cameras_dict[key][6]
                ret.y = ret.y + self.cameras_dict[key][3][1] * self.cameras_dict[key][6]
                ret.z = ret.z + self.cameras_dict[key][3][2] * self.cameras_dict[key][6]
                ret.rx = ret.rx + self.cameras_dict[key][4][0] * self.cameras_dict[key][6]
                ret.ry = ret.ry + self.cameras_dict[key][4][1] * self.cameras_dict[key][6]
                ret.rz = ret.rz + self.cameras_dict[key][4][2] * self.cameras_dict[key][6]
                sigma = sigma + self.cameras_dict[key][6]
            #CALCULATE AVERAGE OF POSITION
            ret.x = ret.x / sigma
            ret.y = ret.y / sigma
            ret.z = ret.z / sigma
            
            #CALCULATE AVERAGE OF ANGLES
            ret.rx = ret.rx / sigma
            ret.ry = ret.ry / sigma
            ret.rz = ret.rz / sigma
            
            print(sigma, ret.x, ret.y, ret.z, ret.rx, ret.ry, ret.rz)
예제 #4
0
def transform_point_cloud_trans_quat(cloud_msg, translation, rotation,
                                     target_frame):
    """
    transform cloud using transforms3d to calculate transformation matrix

    :type cloud_msg: PointCloud2
    :param translation: translation vector
    :type translation: list
    :param rotation: rotation quaternion, i.e. [w, x, y, z]
    :param target_frame: name of new frame to fill in cloud_msg.header.frame_id
    :return: transformed cloud
    :rtype: PointCloud2
    """
    from pytransform3d.transformations import transform_from_pq

    transform_matrix = transform_from_pq(np.append(translation, rotation))
    return transform_point_cloud(cloud_msg, transform_matrix, target_frame)
예제 #5
0
def test_transform_from_pq():
    """Test conversion from position and quaternion to homogeneous matrix."""
    pq = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
    A2B = transform_from_pq(pq)
    assert_array_almost_equal(A2B, np.eye(4))
예제 #6
0
In this example, we will use the transform manager to infer a transformation
automatically.
"""
print(__doc__)

import numpy as np
import matplotlib.pyplot as plt
from pytransform3d.rotations import (random_quaternion, matrix_from_euler_xyz,
                                     q_id)
from pytransform3d.transformations import transform_from_pq, transform_from
from pytransform3d.transform_manager import TransformManager

random_state = np.random.RandomState(0)

ee2robot = transform_from_pq(
    np.hstack((np.array([0.4, -0.3, 0.5]), random_quaternion(random_state))))
cam2robot = transform_from_pq(np.hstack((np.array([0.0, 0.0, 0.8]), q_id)))
object2cam = transform_from(matrix_from_euler_xyz(np.array([0.0, 0.0, 0.5])),
                            np.array([0.5, 0.1, 0.1]))

tm = TransformManager()
tm.add_transform("end-effector", "robot", ee2robot)
tm.add_transform("camera", "robot", cam2robot)
tm.add_transform("object", "camera", object2cam)

ee2object = tm.get_transform("end-effector", "object")

ax = tm.plot_frames_in("robot", s=0.1)
ax.set_xlim((-0.25, 0.75))
ax.set_ylim((-0.5, 0.5))
ax.set_zlim((0.0, 1.0))
예제 #7
0
"""
===========================
Camera Representation in 3D
===========================

This visualization is inspired by Blender's camera visualization. It will
show the camera center, a virtual image plane at a desired distance to the
camera center, and the top direction of the virtual image plane.
"""
print(__doc__)

import numpy as np
import pytransform3d.transformations as pt
import pytransform3d.visualizer as pv

cam2world = pt.transform_from_pq([0, 0, 0, np.sqrt(0.5), -np.sqrt(0.5), 0, 0])
# default parameters of a camera in Blender
sensor_size = np.array([0.036, 0.024])
intrinsic_matrix = np.array([[0.05, 0, sensor_size[0] / 2.0],
                             [0, 0.05, sensor_size[1] / 2.0], [0, 0, 1]])
virtual_image_distance = 1

fig = pv.Figure()
fig.plot_transform(A2B=cam2world, s=0.2)
fig.plot_camera(cam2world=cam2world,
                M=intrinsic_matrix,
                sensor_size=sensor_size,
                virtual_image_distance=virtual_image_distance)
if "__file__" in globals():
    fig.show()
else:
예제 #8
0
    "slam_sensor", "robot",
    pytr.transform_from(
        pyrot.active_matrix_from_intrinsic_euler_xyz([0.0, 0.0, 0]),
        [0.0, 0.0, 0.0]))

while True:
    frames = pipeline.wait_for_frames()
    f = frames.first_or_default(rs.stream.pose)
    data = f.as_pose_frame().get_pose_data()
    #angles = quaternion_to_euler_angle([data.rotation.w, data.rotation.x, data.rotation.y, data.rotation.z])

    tm.add_transform(
        "world", "slam_sensor",
        pytr.transform_from_pq([
            data.translation.x * 1000.0, -data.translation.z * 1000.0,
            data.translation.y * 1000.0, data.rotation.w, data.rotation.x,
            data.rotation.y, data.rotation.z
        ]))

    t = tm.get_transform("world", "origin")
    q = pyrot.quaternion_from_matrix(t[0:3, 0:3])
    print(
        pytr.transform(t, [0, 0, 0, 1])[0:3], quaternion_to_euler_angle(q),
        data.mapper_confidence, data.tracker_confidence)

    ##print("\r Device Position: ", t[0][3], t[1][3], t[2][3], angles, end="\r")

# now = time.time()
# with open("back_side.txt", "w") as text_file:
#     while (time.time() - now) < 5:
#         frames = pipeline.wait_for_frames()