Beispiel #1
0
    def setParams(self, params):

        self.num_cameras = params["num_cameras"]
        self.print = params["print"] == "true"
        self.cameras_dict = {}

        for i in range(int(self.num_cameras)) :
            device_serial = params["device_serial_"+str(i)]
            self.name = params["name_"+str(i)]
            rx = np.radians(float(params["rx_"+str(i)]))
            ry = np.radians(float(params["ry_"+str(i)]))
            rz = np.radians(float(params["rz_"+str(i)]))
            tx = float (params["tx_"+str(i)])
            ty = float(params["ty_"+str(i)])
            tz = float(params["tz_"+str(i)])
            
            # Transforms:   world -> slam_sensor -> robot -> origin 
            #                  (measure)     (sensor pose) (robot base wrt world origin)
            tm = TransformManager()
            tm.add_transform("robot", "origin",
                             pytr.transform_from(pyrot.active_matrix_from_intrinsic_euler_xyz([0.0, 0.0, 0.0]),
                                                 [0.0, 0.0, 0.0]))

            tm.add_transform("slam_sensor", "robot",
                             pytr.transform_from(pyrot.active_matrix_from_intrinsic_euler_xyz([rx, ry, rz]),
                                                 [tx, ty, tz]))

            self.cameras_dict[self.name] = [device_serial, tm]

        # realsense configuration
        try:
            for key in self.cameras_dict:
                config = rs.config()
                config.enable_device(self.cameras_dict[key][0])
                config.enable_stream(rs.stream.pose)
                pipeline = rs.pipeline()
                pipeline.start(config)
                self.cameras_dict[key].append(pipeline)

        except Exception as e:
            print("Error initializing camera")
            print(e)
            sys.exit(-1)

        data_list=[]
        angle_list = []
        mapper_value = 0
        tracker_value = 0
        for key in self.cameras_dict:

            self.cameras_dict[key].append(data_list)
            self.cameras_dict[key].append(angle_list)
            self.cameras_dict[key].append(mapper_value)
            self.cameras_dict[key].append(tracker_value)

        return True
Beispiel #2
0
def test_png_export():
    """Test if the graph can be exported to PNG."""
    random_state = np.random.RandomState(0)

    ee2robot = transform_from_pq(
        np.hstack((np.array([0.4, -0.3,
                             0.5]), random_quaternion(random_state))))
    cam2robot = transform_from_pq(np.hstack((np.array([0.0, 0.0, 0.8]), q_id)))
    object2cam = transform_from(
        active_matrix_from_intrinsic_euler_xyz(np.array([0.0, 0.0, 0.5])),
        np.array([0.5, 0.1, 0.1]))

    tm = TransformManager()
    tm.add_transform("end-effector", "robot", ee2robot)
    tm.add_transform("camera", "robot", cam2robot)
    tm.add_transform("object", "camera", object2cam)

    _, filename = tempfile.mkstemp(".png")
    try:
        tm.write_png(filename)
        assert_true(os.path.exists(filename))
    except ImportError:
        raise SkipTest("pydot is required for this test")
    finally:
        if os.path.exists(filename):
            try:
                os.remove(filename)
            except WindowsError:
                pass  # workaround for permission problem on Windows
Beispiel #3
0
    def FullPoseEstimation_setInitialPose(self, x, y, z, rx, ry, rz):

        for key in self.cameras_dict:
             self.cameras_dict[key][1].add_transform("robot", "origin",
                              pytr.transform_from(pyrot.active_matrix_from_intrinsic_euler_xyz([rx, ry, rz]),

                                                 [x, y, z]))
Beispiel #4
0
 def __init__(self, proxy_map):
     super(SpecificWorker, self).__init__(proxy_map)
     self.tm = TransformManager()
     self.tm.add_transform(
         "origin", "world",
         pytr.transform_from(
             pyrot.active_matrix_from_intrinsic_euler_xyz([0.0, 0.0, 0.0]),
             [0.0, 0.0, 0.0]))
Beispiel #5
0
    def FullPoseEstimation_setInitialPose(self, x, y, z, rx, ry, rz):

        # should move robot in Coppelia to designated pose
        self.tm.add_transform(
            "origin", "world",
            pytr.transform_from(
                pyrot.active_matrix_from_intrinsic_euler_xyz([rx, ry, rz]),
                [x, y, z]))
Beispiel #6
0
def test_world2image():
    cam2world = transform_from(
        active_matrix_from_intrinsic_euler_xyz([np.pi, 0, 0]),
        [0, 0, 1.5])
    focal_length = 0.0036
    sensor_size = (0.00367, 0.00274)
    image_size = (640, 480)

    world_grid = make_world_grid()
    image_grid = world2image(world_grid, cam2world, sensor_size, image_size,
                             focal_length)
    expected_grid = make_world_grid(xlim=(110.73569482, 529.26430518),
                                    ylim=(450.2189781, 29.7810219))
    assert_array_almost_equal(image_grid, expected_grid[:, :2])
Beispiel #7
0
"""
print(__doc__)

import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pytransform3d import rotations as pr

ax = pr.plot_basis(R=np.eye(3), ax_s=2, lw=3)
axis = 2
angle = np.pi

p = np.array([1.0, 1.0, 1.0])
euler = [0, 0, 0]
euler[axis] = angle
R = pr.active_matrix_from_intrinsic_euler_xyz(euler)
pr.plot_basis(ax, R, p)

p = np.array([-1.0, 1.0, 1.0])
euler = [0, 0, 0]
euler[2 - axis] = angle
R = pr.active_matrix_from_intrinsic_euler_zyx(euler)
pr.plot_basis(ax, R, p)

p = np.array([1.0, 1.0, -1.0])
R = pr.active_matrix_from_angle(axis, angle)
pr.plot_basis(ax, R, p)

p = np.array([1.0, -1.0, -1.0])
e = [pr.unitx, pr.unity, pr.unitz][axis]
a = np.hstack((e, (angle, )))
Beispiel #8
0
#!/usr/bin/python3

import numpy as np
from pytransform3d.transform_manager import TransformManager
import pytransform3d.transformations as pytr
import pytransform3d.rotations as pyrot

tm = TransformManager()
# place the world with rotation and translationa as seen from origin
tm.add_transform("origin", "world", pytr.transform_from(pyrot.active_matrix_from_extrinsic_euler_xyz([0, 0, 0]), [0.0, 0.0, 0]))
# place the robot with rotation and translation as seen from world
tm.add_transform("world", "robot", pytr.transform_from(pyrot.active_matrix_from_intrinsic_euler_xyz([0, 0, np.pi/2.0]), [-10.0, 0, 0]))

t = tm.get_transform("world", "robot")
print(t)
# transform the nose of the robot to world CS
p = pytr.transform(t, np.array([0, 10, 0, 1.0]))
print(p)



We can see the camera frame and the world frame. There is a grid of points from
which we know the world coordinates. If we know the location and orientation of
the camera in the world, we can easily compute the location of the points on
the image.
"""
print(__doc__)

import numpy as np
import matplotlib.pyplot as plt
from pytransform3d.plot_utils import make_3d_axis
from pytransform3d.rotations import active_matrix_from_intrinsic_euler_xyz
from pytransform3d.transformations import transform_from, plot_transform
from pytransform3d.camera import make_world_grid, world2image, plot_camera

cam2world = transform_from(
    active_matrix_from_intrinsic_euler_xyz([-np.pi + 1, -0.1, 0.3]),
    [0.2, -1, 0.5])
focal_length = 0.0036
sensor_size = (0.00367, 0.00274)
image_size = (640, 480)
intrinsic_camera_matrix = np.array([[focal_length, 0, sensor_size[0] / 2],
                                    [0, focal_length, sensor_size[1] / 2],
                                    [0, 0, 1]])

world_grid = make_world_grid(n_points_per_line=101)
image_grid = world2image(world_grid,
                         cam2world,
                         sensor_size,
                         image_size,
                         focal_length,
                         kappa=0.4)
Beispiel #10
0
    config = rs.config()
    config.enable_device("925122110807")
    config.enable_stream(rs.stream.pose)
    pipeline = rs.pipeline()
    pipeline.start(config)

except Exception as e:
    print("Error initializing camera")
    print(e)
    sys.exit(-1)

tm = TransformManager()
tm.add_transform(
    "robot", "origin",
    pytr.transform_from(
        pyrot.active_matrix_from_intrinsic_euler_xyz([0.0, 0.0, 0.0]),
        [2000, -30000, 0.0]))

tm.add_transform(
    "slam_sensor", "robot",
    pytr.transform_from(
        pyrot.active_matrix_from_intrinsic_euler_xyz([0.0, 0.0, 0]),
        [0.0, 0.0, 0.0]))

while True:
    frames = pipeline.wait_for_frames()
    f = frames.first_or_default(rs.stream.pose)
    data = f.as_pose_frame().get_pose_data()
    #angles = quaternion_to_euler_angle([data.rotation.w, data.rotation.x, data.rotation.y, data.rotation.z])

    tm.add_transform(