def calibrate_from_table_poses(table_poses, start_offset, grid_size,
                               tcp_tool_offset):
    table_pts_in_base_frame = np.array(
        [base_t_tcp @ (0, 0, tcp_tool_offset) for base_t_tcp in table_poses])

    # estimate table_t_base from the first table pose
    base_t_tcp = table_poses[0]
    table_offset_x, table_offset_y = (np.array(start_offset) + .5) * grid_size
    base_t_table = table_poses[0] @ tcp_t_tool @ Transform(
        p=(-table_offset_x, -table_offset_y, -tcp_tool_offset))
    table_t_base = base_t_table.inv

    table_pts_in_table_frame = np.array([
        table_t_base @ base_t_tcp @ (0, 0, tcp_tool_offset)
        for base_t_tcp in table_poses
    ])

    # round to nearest possible table positions
    table_pts_in_table_frame[:, 2] = 0
    table_pts_in_table_frame[:, :2] = np.round(
        ((table_pts_in_table_frame[:, :2] - grid_size / 2) /
         grid_size)) * grid_size + grid_size / 2

    R, t = kabsch(table_pts_in_base_frame, table_pts_in_table_frame)
    table_t_base = Transform(R=R, p=t)
    return table_t_base
def rotational_grasp_symmetry(kit_t_tcp_grasp: Transform,
                              deg: int,
                              offset=(0, 0, 0)):
    assert 360 % deg == 0
    n = 360 // deg
    to = Transform(p=offset)
    return [
        kit_t_tcp_grasp @ to.inv
        @ Transform(rotvec=(0, 0, np.deg2rad(i * deg))) @ to for i in range(n)
    ]
Esempio n. 3
0
    def mouseMoveEvent(self, e: QtGui.QMouseEvent):
        x_last, y_last = self.last_mouse_position
        x, y = e.x(), e.y()
        dx, dy = x - x_last, y - y_last
        self.last_mouse_position = x, y
        modifiers = QtWidgets.QApplication.keyboardModifiers()

        if self.mode == Mode.CROP:
            self.update_crop_drag_box(*self.crop_start, x, y)
            self.update()
        elif self.mode == Mode.ADJUST and self.obj and self.obj.pose:
            s = self.scale
            cam_t_center = self.cam_t_model @ Transform(p=self.obj.center)

            if modifiers == Qt.ControlModifier:  # move xy
                ss = self.cam_t_model.p[2] * 1e-3 * s
                self.set_cam_t_model(Transform(p=(dx * ss, dy * ss, 0)) @ self.cam_t_model)
            elif modifiers == (Qt.ControlModifier | Qt.ShiftModifier):  # move in depth
                dp = cam_t_center.p * dy * 2e-3 * s
                self.set_cam_t_model(Transform(p=dp) @ self.cam_t_model)
            elif modifiers == Qt.ShiftModifier:  # rotate cam z
                cam_R_model = Transform(rotvec=(0, 0, -dx * 3e-3)).R @ self.cam_t_model.R
                center_travel = Transform(p=self.cam_t_model.p, R=cam_R_model) @ self.obj.center - cam_t_center.p
                self.set_cam_t_model(Transform(p=self.cam_t_model.p - center_travel, R=cam_R_model))
            else:  # rotate cam x y
                rot = Transform(rotvec=(dy * 3e-3, -dx * 3e-3, 0))
                cam_R_model = rot.R @ self.cam_t_model.R
                center_travel = Transform(p=self.cam_t_model.p, R=cam_R_model) @ self.obj.center - cam_t_center.p
                self.set_cam_t_model(Transform(p=self.cam_t_model.p - center_travel, R=cam_R_model))
def _get_pose(Q, s):
    P = np.array([
        [0, s, 0], [s, s, 0],
        [0, 0, 0], [s, 0, 0],
    ])
    R, p = _kabsch(P, Q)
    return Transform(R=R, p=p)
Esempio n. 5
0
 def move(self,
          pos,
          theta=0.,
          speed=1.2,
          acc=5.,
          instant=False,
          record=False):
     world_t_tool_desired = Transform(p=pos,
                                      rpy=(0, np.pi, np.pi / 2 + theta))
     if instant:
         self.set_q(self.ik(world_t_tool_desired))
     else:
         world_t_tool_start = self.world_t_tool()
         tool_start_t_tool_desired = world_t_tool_start.inv @ world_t_tool_desired
         dist = np.linalg.norm(tool_start_t_tool_desired.xyz_rotvec)
         images = []
         # qs = []
         for i, s in enumerate(lerp(dist, speed, acc, self.dt)):
             world_t_tool_target = world_t_tool_start @ (
                 tool_start_t_tool_desired * s)
             self.set_q_target(self.ik(world_t_tool_target))
             p.stepSimulation()
             if record and i % 4 == 0:  # fps = 1/dt / 4
                 images.append(self.take_image())
                 # qs.append(self.q_target)
         # return images, qs
     return None
Esempio n. 6
0
 def disc_sampling_search(self,
                          radius: float,
                          depth: float,
                          poke_speed=0.03,
                          move_speed=.25,
                          move_acc=1.2,
                          stop_condition=lambda: False,
                          timeout=20.):
     # TODO: insertion direction
     base_t_tcp_init = self.base_t_tcp()
     start = time.time()
     while True:
         if time.time() - start > timeout:
             raise TimeoutError()
         x, y = utils.sample_hyper_sphere_volume(r=radius, d=2)
         tcp_init_t_tcp_next = Transform(p=(x, y, 0))
         base_t_tcp_next = base_t_tcp_init @ tcp_init_t_tcp_next
         self.ctrl.moveL(base_t_tcp_next, move_speed, move_acc)
         try:
             self.speed_until_ft((0, 0, poke_speed), max_travel=depth)
         except DeviatingMotionError:
             return TerminateReason.TRAVEL_LIMIT
         if stop_condition():
             return TerminateReason.STOP_CONDITION
         self.ctrl.moveL(base_t_tcp_next, move_speed, move_acc)
Esempio n. 7
0
    def select_object(self, obj: Object):
        self.obj = obj
        for c in self.fl.children():
            c.remove()
        if obj.pose is None:
            open_images = [img for img in self.scene.images if img.is_open]
            if open_images:
                image = open_images[0]
            else:
                image = self.scene.images[0]

            obj.pose = image.camera_pose @ Transform(
                p=(0, 0, obj.diameter * 3), rpy=(np.pi / 2, 0, 0))
            obj.pose = obj.pose @ Transform(p=-obj.center)

        for overlay in self.overlays:
            overlay.render_overlay.set_obj(obj)
Esempio n. 8
0
 def _get_base_t_frame(self, base_t_frame: Frame):
     if isinstance(base_t_frame, str):
         if base_t_frame.lower() == 'base':
             base_t_frame = Transform()
         elif base_t_frame.lower() == 'tcp':
             base_t_frame = self.base_t_tcp()
     if not isinstance(base_t_frame, Transform):
         raise ValueError('{} not recognized as frame'.format(base_t_frame))
     return base_t_frame
Esempio n. 9
0
def load_scene():
    table, b_base, b_tcp, c_base, c_tcp, cam_black, cam_white = SceneNode.n(7)
    table.adopt(b_base.adopt(b_tcp),
                c_base.adopt(c_tcp.adopt(cam_black, cam_white)))
    state = SceneState()
    state[table] = Transform()
    state[b_base] = Transform.load('calib/table_t_base_b')
    state[c_base] = Transform.load('calib/table_t_base_c')
    state[cam_black] = Transform.load('calib/tcp_t_cam_black')
    state[cam_white] = Transform.load('calib/tcp_t_cam_white')

    return Scene(table, b_base, b_tcp, c_base, c_tcp, cam_black,
                 cam_white), state
def build_finger_grasp_volume(grasp_width: float,
                              w=0.032,
                              h=0.035,
                              bottom_width=0.007,
                              base_height=0.005,
                              tcp_z_finger_bottom=0.1915):
    one_side = [(bottom_width, 0), (w, h - base_height), (w, h * 3)]
    one_side = np.array(one_side) + (grasp_width, 0)
    poly = np.concatenate((one_side, one_side[::-1] * (-1, 1)))
    poly = Polygon(poly)
    mesh = extrude_polygon(poly, w)
    mesh.apply_translation((0, 0, -w / 2))
    mesh.apply_transform(Transform(rotvec=(-np.pi / 2, 0, 0)).matrix)
    mesh.apply_translation((0, 0, tcp_z_finger_bottom))
    return mesh
Esempio n. 11
0
    def move(self,
             pos,
             theta=0.,
             speed=1.2,
             acc=5.,
             instant=False,
             record=False,
             save=False):
        world_t_tool_desired = Transform(p=pos,
                                         rpy=(0, np.pi, np.pi / 2 + theta))
        if instant:
            self.set_q(self.ik(world_t_tool_desired))
        else:
            world_t_tool_start = self.world_t_tool()
            tool_start_t_tool_desired = world_t_tool_start.inv @ world_t_tool_desired
            dist = np.linalg.norm(tool_start_t_tool_desired.xyz_rotvec)
            images = []
            states_l = []

            for i, s in enumerate(lerp(dist, speed, acc, self.dt)):
                world_t_tool_target = world_t_tool_start @ (
                    tool_start_t_tool_desired * s)
                self.set_q_target(self.ik(world_t_tool_target))
                p.stepSimulation()
                if record and i % 4 == 0:  # fps = 1/dt / 4
                    images.append(self.take_image())
                    if save:
                        # qs.append(self.q_target)
                        cube_pos, cube_orn = p.getBasePositionAndOrientation(
                            self.cube_id)

                        # maybe from here we could get the q_dots?
                        # p.setJointMotorControlArray(
                        #     self.rid, self.joint_idxs, p.POSITION_CONTROL, q,
                        #     forces=[100] * 7 + [15] * 2, positionGains=[1] * 9
                        # )
                        states_l.append(
                            State(qs=self.q_target,
                                  q_dots=None,
                                  cube_pos=cube_pos,
                                  cube_orient=cube_orn,
                                  keypoint=None))
            if save:
                return states_l
            else:
                return None
Esempio n. 12
0
    def spiral_search(self,
                      tol: float,
                      max_radius: float,
                      push_force=5.,
                      speed=0.01,
                      ft_stop=(15., 15., np.inf, 2., 2., 2.),
                      stop_condition=lambda: False,
                      lookahead_time=0.1,
                      gain=600):
        # TODO: acc limit
        # TODO: insertion direction
        try:
            base_t_tcp_init = self.base_t_tcp()
            self.ctrl.forceModeStart(base_t_tcp_init, [0, 0, 1, 0, 0, 0],
                                     [0, 0, push_force, 0, 0, 0], 2,
                                     [0.05] * 6)

            a, b = spiral_utils.get_spiral_params(tol)
            theta = 0
            while True:
                loop_start = time.time()
                theta = spiral_utils.next_theta_constant_speed(theta,
                                                               a,
                                                               b,
                                                               speed=speed,
                                                               dt=self.dt)
                r = spiral_utils.radius_from_theta(a, b, theta)
                if r > max_radius:
                    raise DeviatingMotionError()
                x, y = np.cos(theta) * r - a, np.sin(theta) * r
                base_t_tcp = Transform.from_xyz_rotvec(
                    self.recv.getActualTCPPose())
                ft_tcp = base_t_tcp.inv.rotate(self.recv.getActualTCPForce())
                if any(np.abs(ft_tcp) > ft_stop):
                    return TerminateReason.FORCE_TORQUE_LIMIT
                elif stop_condition():
                    return TerminateReason.STOP_CONDITION
                tcp_start_t_tcp_next = Transform(p=(x, y, 0))
                base_t_tcp_next = base_t_tcp_init @ tcp_start_t_tcp_next
                self.ctrl.servoL(base_t_tcp_next, 0.5, 0.25, self.dt,
                                 lookahead_time, gain)
                loop_duration = time.time() - loop_start
                time.sleep(max(0., self.dt - loop_duration))
        finally:
            self.ctrl.servoStop()
            self.ctrl.forceModeStop()
def determine_grasps(obj_grasp_configs,
                     kit_t_objects: Dict[str, Transform],
                     min_clearance,
                     debug=False,
                     debug_view_res=(800, 500)):
    if debug:
        scene = trimesh.Scene()
        for name, mesh, *_ in obj_grasp_configs:
            scene.add_geometry(mesh,
                               transform=kit_t_objects[name].matrix,
                               geom_name=name)
        scene.show(resolution=debug_view_res)

    lense = trimesh.load_mesh(
        Path(__file__).parent.parent / 'stl' / 'lense.stl')
    lense.apply_scale(1e-3)
    col_tool = CollisionManager()
    col_tool.add_object('lense', lense)

    col_objects = CollisionManager()
    for name, mesh, *_ in obj_grasp_configs:
        col_objects.add_object(name,
                               mesh,
                               transform=kit_t_objects[name].matrix)

    worst_possible_clearance = np.inf
    grasps = []
    candidates = [*obj_grasp_configs]
    clearances = []
    i = 0
    while candidates:
        candidate = candidates[i]
        name, mesh, tcp_t_obj, grasp_start_width, grasp_end_width, sym_deg, sym_offset = candidate
        kit_t_obj = kit_t_objects[name]

        # move away the grasp obj to ignore obj-tcp collision
        col_objects.set_transform(name, Transform(p=(1000, 0, 0)).matrix)

        finger_grasp_volume = build_finger_grasp_volume(grasp_start_width)
        col_tool.add_object('finger_grasp_volume', finger_grasp_volume)

        kit_t_tcp = kit_t_obj @ tcp_t_obj.inv
        ts = rotational_grasp_symmetry(kit_t_tcp, sym_deg, sym_offset)
        dists, dists_name = [], []
        for t in ts:
            col_tool.set_transform('lense', t.matrix)
            col_tool.set_transform('finger_grasp_volume', t.matrix)
            dist, dist_names = col_objects.min_distance_other(
                col_tool, return_names=True)
            if dist < worst_possible_clearance:
                worst_possible_clearance = dist
            dists.append(dist)
            dists_name.append(dist_names[0])
        di = np.argmax(dists).item()
        kit_t_tcp, dist, dist_name = ts[di], dists[di], dists_name[di]
        tcp_t_obj = kit_t_tcp.inv @ kit_t_obj

        col_tool.remove_object('finger_grasp_volume')

        if dist > min_clearance:
            grasps.append(
                (name, tcp_t_obj, grasp_start_width, grasp_end_width))
            candidates.remove(candidate)
            clearances.append(dist)
            i = 0
            if debug:
                print(f'{name}: {dist:.3f}')
                scene.add_geometry(lense,
                                   transform=kit_t_tcp.matrix,
                                   geom_name='lense')
                scene.add_geometry(finger_grasp_volume,
                                   transform=kit_t_tcp.matrix,
                                   geom_name='finger_grasp_volume')
                scene.show(resolution=debug_view_res)
                scene.delete_geometry([name, 'lense', 'finger_grasp_volume'])
        else:
            if debug:
                print(f'!! {name}: {dist_name} {dist:.3f} - changing order')
            # move the grasp object back in place
            col_objects.set_transform(name, kit_t_obj.matrix)
            i += 1
            if i == len(candidates):
                raise RuntimeError('cant find solution with this clearance')

    return grasps
def extract_kit_t_objects(svg_filepath, debug=False):
    img = svg_to_img.svg_to_numpy(svg_filepath)
    h, w = img.shape

    kit_t_objects = dict()

    folder = Path(__file__).parent
    obj_configs = json.load((folder / 'obj_config.json').open())
    cad_files = json.load((folder.parent / 'cad_files.json').open())

    debug_img = np.tile(img[..., None], (1, 1, 3))

    for name, config in obj_configs.items():
        template = cv2.imread(str(folder / 'templates' / f'{name}.png'), cv2.IMREAD_GRAYSCALE)
        res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
        *_, (xmi, ymi) = cv2.minMaxLoc(res)
        xma, yma = xmi + template.shape[1], ymi + template.shape[0]
        cv2.rectangle(debug_img, (xmi, ymi), (xma, yma), (50, 50, 50), 1)
        cv2.putText(debug_img, name, (xmi, ymi), cv2.FONT_HERSHEY_PLAIN, 2, (50, 50, 50), 2)
        cx, cy = (xmi + xma) / 2, (ymi + yma) / 2

        # get rotation matrix
        up = get_signed_axis(config['up_kit'])
        if 'forward_kit' in config:
            forward = get_signed_axis(config['forward_kit'])
        else:
            # choose a valid forward vector for objects with symmetry
            forward = np.eye(3)[np.argmin(np.abs(np.eye(3) @ up))]
        assert forward @ up == 0
        R = np.stack((np.cross(forward, up), forward, up))
        assert np.linalg.det(R) == 1

        t = (0, 0, config['height_offset_kit']) + np.array((cx, h - cy, 0)) / h * kit_size_m
        if 'center_offset_kit' in config:
            t -= R @ config['center_offset_kit']

        kit_t_objects[name] = Transform(R=R, p=t)

        if debug:
            cad_file = folder.parent / 'stl' / f'{cad_files[name]}.stl'
            verts_obj = trimesh.load_mesh(cad_file).vertices * 1e-3
            verts_kit = verts_obj @ R.T + t
            z_min = verts_kit[:, 2].min()
            assert np.abs(z_min) < 1e-4

            # flip y for img frame, convert from m to px
            for axes, color in ([0, 1], (255, 0, 0)), ([0, 2], (0, 255, 0)):
                uv = (0, h - 1) + verts_kit[:, axes] * (1, -1) * h / kit_size_m
                uv = np.around(uv).astype(int)
                u, v = uv.T
                mask = np.zeros((h, w), dtype=np.uint8)
                mask[v, u] = 255
                mask = cv2.dilate(mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
                debug_img[mask > 0] = color

    if debug:
        plt.imshow(debug_img, interpolation='bilinear')
        plt.axis('off')
        plt.show()

    return kit_t_objects
Esempio n. 15
0
from gripper import get_gripper

parser = argparse.ArgumentParser()
parser.add_argument('obj_names', nargs='+')
parser.add_argument('--wait', type=float, default=1.)
parser.add_argument('--dont-put-back', action='store_true')
parser.add_argument('--stop-at-grasp', action='store_true')
args = parser.parse_args()

obj_names = args.obj_names

gripper = get_gripper()
gripper.open()

r = Robot.from_ip('192.168.1.130')
r.ctrl.moveL(r.base_t_tcp() @ Transform(p=(0, 0, -0.05)))
q_safe = (2.8662071228027344, -1.7563158474364222, -1.9528794288635254,
          -1.0198443692973633, -4.752078358327047, -2.1280840078936976)

grasp_start_widths = {
    'big_round_peg': 25,
    'small_round_peg': 17,
    'big_gear': 40,
    'small_gear': 26,
    'bnc': 24,
    'belt': 30,
    'small_square_peg': 14,
    'big_square_peg': 22,
    'ethernet_cable_head': 22,
}
import time
import argparse
from typing import List

import numpy as np
from ur_control import Robot

from transform3d import Transform

pose_elements = 'x', 'y', 'z', 'rx', 'ry', 'rz'
tcp_t_tool = Transform(rpy=(np.pi, 0, np.pi / 2))  # TODO: translation?


def kabsch(P, Q):  # P, Q: [N, d]
    assert P.shape == Q.shape, '{}, {}'.format(P.shape, Q.shape)
    d = P.shape[1]
    Pc, Qc = P.mean(axis=0), Q.mean(axis=0)
    P, Q = P - Pc, Q - Qc
    H = P.T @ Q
    u, _, vt = np.linalg.svd(H, full_matrices=False)
    s = np.eye(d)
    s[-1, -1] = np.linalg.det(vt.T @ u.T)
    R = vt.T @ s @ u.T
    t = Qc - R @ Pc
    return R, t


def settle(robot: Robot, t=1., damping_sequence=(0.05, 0.1)):
    t = t / len(damping_sequence)
    for damping in damping_sequence:
        robot.ctrl.forceModeSetDamping(damping)
Esempio n. 17
0
import json
import numpy as np
from transform3d import Transform


def camera_matrix_from_fov(fovx, w, h):
    f = .5 * w / np.tan(fovx / 2)
    return np.array(((f, 0, w / 2), (0, f, h / 2), (0, 0, 1)))


h, w = 480, 640
camera_matrix = camera_matrix_from_fov(np.deg2rad(45), w, h).tolist()

# read from blender
camera_poses = [
    Transform(p=(0., 0., 1.)),
    Transform(p=(1., 0., 0.5), rpy=(65, 0, 90), degrees=True),
    Transform(p=(0., 1., 0.), rpy=(90, 0, 180), degrees=True),
    Transform(p=(-1., -1., 0.5), rpy=(65, 0, -45), degrees=True),
    Transform(p=(0., -0.7, 0.6), rpy=(50, 0, 0), degrees=True),
]
# blender/opengl camera frame convention to cv2
camera_poses = [vp @ Transform(rpy=(np.pi, 0, 0)) for vp in camera_poses]

cameras = [dict(name='A', width=w, height=h, matrix=camera_matrix)]

objects = [
    dict(name='monkey', file_path='monkey.stl'),
    dict(name='torus', file_path='torus.stl'),
    dict(name='offset_cylinder', file_path='offset_cylinder.stl')
]
Esempio n. 18
0
    2.2288451194763184, -1.490114526157715, 1.5134428183185022,
    -0.8572123807719727, -1.3860304991351526, -0.5580604712115687
])
for q, r in zip(config['robots_q_init'], [r_b, r_c]):
    r.ctrl.moveJ(q)

r_b.zero_ft_sensor()
base_t_tcp_b_init = r_b.base_t_tcp()

for _ in range(args.n):
    if rospy.is_shutdown():
        break

    yz = np.random.randn(2)
    yz = yz / np.linalg.norm(yz) * max_err
    tcp_err = Transform(p=(0, *yz))
    base_t_tcp_b = base_t_tcp_b_init @ tcp_err
    r_b.ctrl.moveL(base_t_tcp_b)

    peg_in_hole_visual_servoing.servo(
        peg_robot=r_b,
        aux_robots=[r_c],
        scene_state=state,
        peg_tcp_node=scene.b_tcp,
        aux_tcp_nodes=[scene.c_tcp],
        camera_nodes=[scene.cam_black, scene.cam_white],
        servo_config=config,
        insertion_direction_tcp=np.array((1, 0, 0)),
        alpha_target=0.95,
        timeout=10,
    )
Esempio n. 19
0
 def world_t_tool(self):
     pos, quat = p.getLinkState(self.rid, 11)[:2]
     return Transform(p=pos, quat=quat)
Esempio n. 20
0
def mesh_to_vertnorms(mesh: trimesh.Trimesh):
    verts = mesh.vertices[mesh.faces].reshape(-1, 3)
    norms = np.tile(mesh.face_normals[:, None], (1, 3, 1)).reshape(-1, 3)
    # norms = mesh.vertex_normals[mesh.faces].reshape(-1, 3)
    return np.concatenate((verts, norms), -1).reshape(-1).astype('f4')


def to_uniform(a):
    a = np.asarray(a, 'f4')
    if a.ndim == 2:
        a = a.T.reshape(-1)
    assert a.ndim == 1
    return tuple(a)


gl_t_cv = Transform(rpy=(np.pi, 0, 0))


def cvt_camera_matrix_cv_gl(camera_matrix: np.ndarray):
    camera_matrix_out = camera_matrix.copy()
    camera_matrix_out[:, 2] *= -1
    return camera_matrix_out


def farthest_point_sampling(all_points: np.ndarray, n: int):  # (N, d)
    assert n >= 0
    assert all_points.ndim == 2
    sampled_points_idx = []
    sampled_points = all_points.mean(axis=0, keepdims=True)
    for _ in range(n):
        dists = np.linalg.norm(all_points[:, None] - sampled_points[None],
parser = argparse.ArgumentParser()
parser.add_argument('--folder', required=True)
args = parser.parse_args()

folder = Path(args.folder)

calib = CameraIntrinsics(**json.load(open(folder / 'camera_intrinsics.json')))

rvecs, tvecs = np.array(calib.rvecs)[..., 0], np.array(calib.tvecs)[..., 0]

base_t_tcps = [
    Transform.from_xyz_rotvec(d)
    for d in np.load(folder / 'image_tcp_poses.npy')[calib.img_ids]
]
cam_t_boards = [
    Transform(p=tvec, rotvec=rvec) for rvec, tvec in zip(rvecs, tvecs)
]

n = len(cam_t_boards)
assert n == len(base_t_tcps), '{}, {}'.format(n, len(base_t_tcps))
A, B = [], []

shuffle_idx = np.arange(n)
for j in range(10):
    np.random.RandomState(seed=j).shuffle(shuffle_idx)
    for i in range(1, n):
        a, b = shuffle_idx[i], shuffle_idx[i - 1]
        A.append((base_t_tcps[a].inv @ base_t_tcps[b]).matrix)
        B.append((cam_t_boards[a] @ cam_t_boards[b].inv).matrix)

R, t = park_martin.calibrate(A, B)
def servo(peg_robot: Robot,
          peg_tcp_node: SceneNode,
          scene_state: SceneState,
          servo_config: dict,
          camera_nodes: Sequence[SceneNode],
          aux_robots: Sequence[Robot] = (),
          aux_tcp_nodes: Sequence[SceneNode] = (),
          insertion_direction_tcp=np.array((0, 0, 1)),
          err_tolerance_scale=0.05,
          timeout=5.,
          max_travel: float = None,
          max_travel_scale=3.,
          alpha_target=0.9,
          alpha_err=0.9):
    state = scene_state.copy()
    crop_configs = servo_config['crop_configs']
    insertion_direction_tcp = np.asarray(
        insertion_direction_tcp) / np.linalg.norm(insertion_direction_tcp)
    assert len(aux_robots) == len(aux_tcp_nodes)
    tcp_nodes = (peg_tcp_node, *aux_tcp_nodes)
    robots = (peg_robot, *aux_robots)
    assert len(servo_config['robots_q_init']) == len(robots)
    assert len(aux_robots)
    n_cams = len(crop_configs)
    z_ests, diameter_est = servo_config['z_ests'], servo_config['diameter_est']
    assert n_cams == len(z_ests) == len(camera_nodes)
    err_tolerance = diameter_est * err_tolerance_scale
    if max_travel is None:
        max_travel = diameter_est * max_travel_scale
    assert crop.configure(servo_config)

    crop_K_invs = [
        np.linalg.inv(get_roi_transform(crop_config) @ crop_config['K'])
        for crop_config in crop_configs
    ]
    cams_points = [None for _ in range(n_cams)
                   ]  # type: List[Union[None, (float, np.ndarray)]]
    new_data = [False]

    def handle_points(msg: Float64MultiArray, cam_idx: int):
        timestamp, points = msg.data[0], np.array(msg.data[1:]).reshape(2, 2)
        cams_points[cam_idx] = timestamp, points
        new_data[0] = True

    subs = []
    for cam_idx in range(n_cams):
        subs.append(
            rospy.Subscriber('/servo/crop_{}/points'.format(cam_idx),
                             Float64MultiArray,
                             partial(handle_points, cam_idx=cam_idx),
                             queue_size=1))

    scene_configs_times = []  # type: List[float]
    scene_configs = []  # type: List[Sequence[Transform]]

    def add_current_scene_config():
        scene_configs.append([r.base_t_tcp() for r in robots])
        scene_configs_times.append(time.time())

    def update_scene_state(timestamp):
        transforms = scene_configs[utils.bisect_closest(
            scene_configs_times, timestamp)]
        for tcp_node, transform in zip(tcp_nodes, transforms):
            state[tcp_node] = transform

    add_current_scene_config()
    update_scene_state(0)

    peg_tcp_init_node = SceneNode(parent=peg_tcp_node.parent)
    state[peg_tcp_init_node] = scene_configs[-1][0]
    peg_tcp_cur_node = SceneNode(parent=peg_tcp_node.parent)

    base_p_tcp_rolling = state[peg_tcp_init_node].p
    err_rolling, err_size_rolling = None, err_tolerance * 10
    start = time.time()
    try:
        while err_size_rolling > err_tolerance:
            loop_start = time.time()
            add_current_scene_config()
            if new_data[0]:
                new_data[0] = False
                state[peg_tcp_cur_node] = scene_configs[-1][0]

                peg_tcp_init_t_peg_tcp = peg_tcp_init_node.t(
                    peg_tcp_node, state)
                if np.linalg.norm(peg_tcp_init_t_peg_tcp.p) > max_travel:
                    raise DeviatingMotionError()
                if rospy.is_shutdown():
                    raise RuntimeError()
                if loop_start - start > timeout:
                    raise TimeoutError()

                # TODO: check for age of points
                # TODO: handle no camera inputs (raise appropriate error)
                move_dirs = []
                move_errs = []
                for cam_points, K_inv, cam_node, z_est in zip(
                        cams_points, crop_K_invs, camera_nodes, z_ests):
                    if cam_points is None:
                        continue
                    timestamp, cam_points = cam_points
                    update_scene_state(timestamp)
                    peg_tcp_t_cam = peg_tcp_node.t(cam_node, state)

                    pts_peg_tcp = []
                    for p_img in cam_points:
                        p_cam = K_inv @ (*p_img, 1)
                        p_cam *= z_est / p_cam[2]
                        pts_peg_tcp.append(peg_tcp_t_cam @ p_cam)
                    hole_peg_tcp, peg_peg_tcp = pts_peg_tcp

                    view_dir = (peg_peg_tcp +
                                hole_peg_tcp) / 2 - peg_tcp_t_cam.p
                    move_dir = np.cross(view_dir, insertion_direction_tcp)
                    move_dir /= np.linalg.norm(move_dir)

                    already_moved = state[peg_tcp_init_node].inv.rotate(
                        state[peg_tcp_cur_node].p - state[peg_tcp_node].p)
                    move_err = np.dot(move_dir, (hole_peg_tcp - peg_peg_tcp) -
                                      already_moved)

                    move_dirs.append(move_dir)
                    move_errs.append(move_err)

                move_dirs = np.array(move_dirs)
                move_errs = np.array(move_errs)
                if len(move_dirs) > 0:
                    err_tcp, *_ = np.linalg.lstsq(move_dirs,
                                                  move_errs,
                                                  rcond=None)
                    if err_rolling is None:
                        err_rolling = err_tcp
                    err_rolling = alpha_err * err_rolling + (
                        1 - alpha_err) * err_tcp
                    err_size_rolling = alpha_err * err_size_rolling + (
                        1 - alpha_err) * np.linalg.norm(err_rolling)
                    base_t_tcp_target = state[peg_tcp_cur_node] @ Transform(
                        p=err_tcp)
                    base_p_tcp_rolling = alpha_target * base_p_tcp_rolling + (
                        1 - alpha_target) * base_t_tcp_target.p
            peg_robot.ctrl.servoL(
                (*base_p_tcp_rolling, *state[peg_tcp_init_node].rotvec), 0.5,
                0.25, peg_robot.dt, 0.2, 300)
            loop_duration = time.time() - loop_start
            time.sleep(max(0., peg_robot.dt - loop_duration))
    finally:
        peg_robot.ctrl.servoStop()
        for sub in subs:
            sub.unregister()
Esempio n. 23
0
args = parser.parse_args()

obj_name = args.obj_name

gripper = get_gripper()
gripper.open()

r = Robot.from_ip('192.168.1.130')
q_safe = (3.317805290222168, -1.3769071859172364, -1.9077906608581543,
          -1.4512933057597657, -4.752126518880026, -1.590332333241598)
r.ctrl.moveJ(q_safe)

base_t_kit = Transform.load('base_t_kitlayout')

kit_t_obj = Transform.load(f'kit_t_objects_practice/kit_t_{obj_name}')

base_t_tcp = base_t_kit @ Transform(p=(*kit_t_obj.p[:2], 0.25),
                                    rotvec=(np.pi, 0, 0))
r.ctrl.moveL(base_t_tcp)

r.ctrl.teachMode()
input('enter when grasp pose is good')
r.ctrl.endTeachMode()

base_t_tcp = r.base_t_tcp()
tcp_t_obj = base_t_tcp.inv @ base_t_kit @ kit_t_obj

print('tcp_t_obj:')
for val in tcp_t_obj:
    print(val)
# scripts/example_use_poses.py
from ur_control import Robot
from transform3d import Transform
import numpy as np

r = Robot.from_ip('192.168.1.130')
q_safe = (3.317805290222168, -1.3769071859172364, -1.9077906608581543,
          -1.4512933057597657, -4.752126518880026, -1.590332333241598)

base_t_taskboard = Transform.load('base_t_taskboard')
base_t_kitlayout = Transform.load('base_t_kitlayout')

board_t_tcp_desired = Transform(p=(0, 0, 0.24), rotvec=(np.pi, 0, 0))

base_t_tcp = base_t_taskboard @ board_t_tcp_desired
# uncomment the following line to move above the kitlayout frame
# base_t_tcp = base_t_kitlayout @ board_t_tcp_desired
r.ctrl.moveJ(q_safe)
r.ctrl.moveL(base_t_tcp)
Esempio n. 25
0
import numpy as np
from scipy.spatial.kdtree import KDTree
from scipy.spatial.ckdtree import cKDTree
import trimesh
from transform3d import Transform

poses_gt = [
    ('monkey', Transform(rpy=(15, 30, 45), degrees=True)),
    ('torus', Transform(p=(-0.05, 0.07, 0),
                        rpy=(-140, -100, -80),
                        degrees=True)),
    ('offset_cylinder',
     Transform(p=(-0.3, -0.05, 0.05), rpy=(-128, -15, 35), degrees=True)),
]

for name, pose_gt in poses_gt:
    pose_est = Transform.load(f'scene.{name}.pose')

    mesh = trimesh.load_mesh(f'{name}.stl')
    hv = mesh.convex_hull.vertices
    diameter = np.linalg.norm(hv[None] - hv[:, None], axis=-1).max()

    v_gt = pose_gt @ mesh.vertices
    v_est = pose_est @ mesh.vertices

    tree_gt = cKDTree(v_gt)  # type: KDTree
    dists, _ = tree_gt.query(v_est)
    adds_score = np.mean(dists) / diameter
    print(f'{name}: {adds_score:.3f}')