示例#1
0
def test_collision():
    urdf = """
    <robot name="robot_name">
    <link name="link0"/>
    <link name="link1">
        <collision>
            <origin xyz="0 0 1"/>
            <geometry/>
        </collision>
    </link>
    <joint name="joint0" type="fixed">
        <parent link="link0"/>
        <child link="link1"/>
        <origin xyz="0 0 1"/>
    </joint>
    </robot>
    """
    tm = UrdfTransformManager()
    tm.load_urdf(urdf)

    link1_to_link0 = tm.get_transform("link1", "link0")
    expected_link1_to_link0 = transform_from(np.eye(3), np.array([0, 0, 1]))
    assert_array_almost_equal(link1_to_link0, expected_link1_to_link0)

    link1_to_link0 = tm.get_transform("link1/collision_0", "link0")
    expected_link1_to_link0 = transform_from(np.eye(3), np.array([0, 0, 2]))
    assert_array_almost_equal(link1_to_link0, expected_link1_to_link0)
 def objective(params):
     params = np.clip(params, bounds[:, 0], bounds[:, 1])
     e_xyz = params[:3]
     kappa = np.clip(params[-1], 0.0, 0.1)
     cam2world = transform_from(matrix_from_euler_xyz(e_xyz), p)
     Pi_projected = world2image(Pw_corners, cam2world, kappa=kappa,
                                **projection_args)
     return np.sum((Pi_projected - Pi_corners) ** 2, axis=1)
示例#3
0
 def objective(params):
     params = np.clip(params, bounds[:, 0], bounds[:, 1])
     e_xyz = params[:3]
     kappa = np.clip(params[-1], 0.0, 0.1)
     cam2world = transform_from(matrix_from_euler_xyz(e_xyz), p)
     Pi_projected = world2image(Pw_corners,
                                cam2world,
                                kappa=kappa,
                                **projection_args)
     return np.sum((Pi_projected - Pi_corners)**2, axis=1)
def test_invert_transform():
    """Test inversion of transformations."""
    random_state = np.random.RandomState(0)
    for _ in range(5):
        R = matrix_from(a=random_axis_angle(random_state))
        p = random_vector(random_state)
        A2B = transform_from(R, p)
        B2A = invert_transform(A2B)
        A2B2 = np.linalg.inv(B2A)
        assert_array_almost_equal(A2B, A2B2)
示例#5
0
def update_trajectory(step, n_frames, trajectory):
    progress = float(step + 1) / float(n_frames)
    H = np.zeros((100, 4, 4))
    H0 = transform_from(R_id, np.zeros(3))
    H_mod = np.eye(4)
    for i, t in enumerate(np.linspace(0, progress, len(H))):
        H0[:3, 3] = np.array([t, 0, t])
        H_mod[:3, :3] = matrix_from_angle(2, 8 * np.pi * t)
        H[i] = concat(H0, H_mod)

    trajectory.set_data(H)
    return trajectory
def test_vector_to_point():
    """Test conversion from vector to homogenous coordinates."""
    v = np.array([1, 2, 3])
    pA = vector_to_point(v)
    assert_array_almost_equal(pA, [1, 2, 3, 1])

    random_state = np.random.RandomState(0)
    R = matrix_from(a=random_axis_angle(random_state))
    p = random_vector(random_state)
    A2B = transform_from(R, p)
    assert_transform(A2B)
    pB = transform(A2B, pA)
示例#7
0
def test_world2image():
    cam2world = transform_from(matrix_from_euler_xyz([np.pi, 0, 0]),
                               [0, 0, 1.5])
    focal_length = 0.0036
    sensor_size = (0.00367, 0.00274)
    image_size = (640, 480)

    world_grid = make_world_grid()
    image_grid = world2image(world_grid, cam2world, sensor_size, image_size,
                             focal_length)
    expected_grid = make_world_grid(xlim=(110.73569482, 529.26430518),
                                    ylim=(450.2189781, 29.7810219))
    assert_array_almost_equal(image_grid, expected_grid[:, :2])
def check_door(image, Pw_corners, Pi_corners, door_edges,
               required_matching_ratio=0.7, verbose=0):
    """Check if door is closed."""
    results = {}

    image_sobel, image_edges = detect_edges(image)

    # Detect lines with Hough transform
    hough_accumulator, angles, dists = hough_line(image_edges)
    hspace, angles, dists = hough_line_peaks(
        hough_accumulator, angles, dists, threshold=150.0)

    # Estimate camera transformation by minimizing the distance between
    # calibration points
    params = optimize_transform(camera_params, Pw_corners, Pi_corners)
    if verbose >= 1:
        print("Parameters: %s" % np.round(params, 3))
    cam2world = transform_from(matrix_from_euler_xyz(params[:3]), params[3:6])
    kappa = params[-1]

    W2I = partial(world2image, cam2world=cam2world, kappa=kappa,
                  **camera_params)

    # Get edge pixels in vicinity of lines
    Pi_line_points = check_edge_is_on_line(image_edges, angles, dists)

    if len(Pi_line_points) == 0:
        if verbose >= 1:
            print("No lines detected, assume that door is closed")
        door_closed = True
    else:
        # Check how good the edges of the door projected to the image match
        # detected edge pixels that correspond to lines
        matchings = [check_line_is_edge(edge, Pi_line_points, cam2world, kappa,
                                        camera_params) for edge in door_edges]
        results["door_edges_in_image"] = [m[0] for m in matchings]
        ratios = np.array([m[1] for m in matchings])

        if verbose >= 1:
            print(("Matching ratios: " + ", ".join(["%.2f"] * len(ratios)))
                % tuple(100 * ratios))

        door_closed = np.any(ratios > required_matching_ratio)

    results["cam2world"] = cam2world
    results["Pi_line_points"] = Pi_line_points
    results["image_sobel"] = image_sobel
    results["image_edges"] = image_edges
    results["lines"] = (angles, dists)

    return door_closed, W2I, results
automatically.
"""
print(__doc__)

import numpy as np
import matplotlib.pyplot as plt
from pytransform.rotations import (random_quaternion, matrix_from_euler_xyz,
                                   q_id)
from pytransform.transformations import transform_from_pq, transform_from
from pytransform.transform_manager import TransformManager

random_state = np.random.RandomState(0)

ee2robot = transform_from_pq(
    np.hstack((np.array([0.4, -0.3, 0.5]), random_quaternion(random_state))))
cam2robot = transform_from_pq(np.hstack((np.array([0.0, 0.0, 0.8]), q_id)))
object2cam = transform_from(matrix_from_euler_xyz(np.array([0.0, 0.0, 0.5])),
                            np.array([0.5, 0.1, 0.1]))

tm = TransformManager()
tm.add_transform("end-effector", "robot", ee2robot)
tm.add_transform("camera", "robot", cam2robot)
tm.add_transform("object", "camera", object2cam)

ee2object = tm.get_transform("end-effector", "object")

ax = tm.plot_frames_in("robot", s=0.1)
ax.set_xlim((-0.25, 0.75))
ax.set_ylim((-0.5, 0.5))
ax.set_zlim((0.0, 1.0))
plt.show()
示例#10
0
We can see the camera frame and the world frame. There is a grid of points from
which we know the world coordinates. If we know the location and orientation of
the camera in the world, we can easily compute the location of the points on
the image.
"""
print(__doc__)


import numpy as np
import matplotlib.pyplot as plt
from pytransform.rotations import matrix_from_euler_xyz
from pytransform.transformations import transform_from, plot_transform
from pytransform.camera import make_world_grid, world2image


cam2world = transform_from(matrix_from_euler_xyz([np.pi - 1, 0.2, 0.2]),
                           [0.2, -1, 0.5])
focal_length = 0.0036
sensor_size = (0.00367, 0.00274)
image_size = (640, 480)

world_grid = make_world_grid()
image_grid = world2image(world_grid, cam2world, sensor_size, image_size,
                         focal_length)

plt.figure(figsize=(12, 5))
ax = plt.subplot(121, projection="3d", aspect="equal")
ax.view_init(elev=30, azim=-70)
ax.set_xlim((-1, 1))
ax.set_ylim((-1, 1))
ax.set_zlim((-1, 1))
ax.set_xlabel("X")
示例#11
0
def check_door(image,
               Pw_corners,
               Pi_corners,
               door_edges,
               required_matching_ratio=0.7,
               verbose=0):
    """Check if door is closed."""
    results = {}

    image_sobel, image_edges = detect_edges(image)

    # Detect lines with Hough transform
    hough_accumulator, angles, dists = hough_line(image_edges)
    hspace, angles, dists = hough_line_peaks(hough_accumulator,
                                             angles,
                                             dists,
                                             threshold=150.0)

    # Estimate camera transformation by minimizing the distance between
    # calibration points
    params = optimize_transform(camera_params, Pw_corners, Pi_corners)
    if verbose >= 1:
        print("Parameters: %s" % np.round(params, 3))
    cam2world = transform_from(matrix_from_euler_xyz(params[:3]), params[3:6])
    kappa = params[-1]

    W2I = partial(world2image,
                  cam2world=cam2world,
                  kappa=kappa,
                  **camera_params)

    # Get edge pixels in vicinity of lines
    Pi_line_points = check_edge_is_on_line(image_edges, angles, dists)

    if len(Pi_line_points) == 0:
        if verbose >= 1:
            print("No lines detected, assume that door is closed")
        door_closed = True
    else:
        # Check how good the edges of the door projected to the image match
        # detected edge pixels that correspond to lines
        matchings = [
            check_line_is_edge(edge, Pi_line_points, cam2world, kappa,
                               camera_params) for edge in door_edges
        ]
        results["door_edges_in_image"] = [m[0] for m in matchings]
        ratios = np.array([m[1] for m in matchings])

        if verbose >= 1:
            print(("Matching ratios: " + ", ".join(["%.2f"] * len(ratios))) %
                  tuple(100 * ratios))

        door_closed = np.any(ratios > required_matching_ratio)

    results["cam2world"] = cam2world
    results["Pi_line_points"] = Pi_line_points
    results["image_sobel"] = image_sobel
    results["image_edges"] = image_edges
    results["lines"] = (angles, dists)

    return door_closed, W2I, results
示例#12
0
"""
=====================
Transformation Editor
=====================

The transformation editor can be used to manipulate transformations.
"""
from pytransform.transform_manager import TransformManager
from pytransform.editor import TransformEditor
from pytransform.transformations import transform_from
from pytransform.rotations import matrix_from_euler_xyz

tm = TransformManager()

tm.add_transform(
    "tree", "world",
    transform_from(matrix_from_euler_xyz([0, 0.5, 0]), [0, 0, 0.5]))
tm.add_transform(
    "car", "world",
    transform_from(matrix_from_euler_xyz([0.5, 0, 0]), [0.5, 0, 0]))

te = TransformEditor(tm, "world", s=0.3)
te.show()
print("tree to world:")
print(te.transform_manager.get_transform("tree", "world"))
Transform Concatenation
=======================

In this example, we have a point p that is defined in a frame C, we know
the transform C2B and B2A. We can construct a transform C2A to extract the
position of p in frame A.
"""
print(__doc__)


import numpy as np
import matplotlib.pyplot as plt
import pytransform.rotations as pyrot
import pytransform.transformations as pytr


p = np.array([0.0, 0.0, -0.5])
a = np.array([0.0, 0.0, 1.0, np.pi])
B2A = pytr.transform_from(pyrot.matrix_from_axis_angle(a), p)

p = np.array([0.3, 0.4, 0.5])
a = np.array([0.0, 0.0, 1.0, -np.pi / 2.0])
C2B = pytr.transform_from(pyrot.matrix_from_axis_angle(a), p)

C2A = pytr.concat(C2B, B2A)
p = pytr.transform(C2A, np.ones(4))

ax = pytr.plot_transform(A2B=B2A)
pytr.plot_transform(ax, A2B=C2A)
ax.scatter(p[0], p[1], p[2])
plt.show()