def calibrate_from_table_poses(table_poses, start_offset, grid_size, tcp_tool_offset): table_pts_in_base_frame = np.array( [base_t_tcp @ (0, 0, tcp_tool_offset) for base_t_tcp in table_poses]) # estimate table_t_base from the first table pose base_t_tcp = table_poses[0] table_offset_x, table_offset_y = (np.array(start_offset) + .5) * grid_size base_t_table = table_poses[0] @ tcp_t_tool @ Transform( p=(-table_offset_x, -table_offset_y, -tcp_tool_offset)) table_t_base = base_t_table.inv table_pts_in_table_frame = np.array([ table_t_base @ base_t_tcp @ (0, 0, tcp_tool_offset) for base_t_tcp in table_poses ]) # round to nearest possible table positions table_pts_in_table_frame[:, 2] = 0 table_pts_in_table_frame[:, :2] = np.round( ((table_pts_in_table_frame[:, :2] - grid_size / 2) / grid_size)) * grid_size + grid_size / 2 R, t = kabsch(table_pts_in_base_frame, table_pts_in_table_frame) table_t_base = Transform(R=R, p=t) return table_t_base
def from_file(cls, fp): fp_dir = os.path.dirname(fp) scene = cls() data = json.load(open(fp)) for d in data['cameras']: scene.cameras[d['name']] = Camera(np.array(d['matrix']), d['width'], d['height']) for d in data['objects']: scene.objects.append( Object( d['name'], os.path.join(fp_dir, d['file_path']), Transform.from_xyz_rotvec(d['pose']) if 'pose' in d else None, d['scale'] if 'scale' in d else 1, )) images = [] for d in data['images']: images.append( Image(os.path.join(fp_dir, d['file_path']), scene.cameras[d['camera']], Transform.from_xyz_rotvec(d['camera_pose']))) order = utils.farthest_point_sampling( np.array([img.camera_pose.p for img in images]), len(images)) for i in order: scene.images.append(images[i]) return scene
def rotational_grasp_symmetry(kit_t_tcp_grasp: Transform, deg: int, offset=(0, 0, 0)): assert 360 % deg == 0 n = 360 // deg to = Transform(p=offset) return [ kit_t_tcp_grasp @ to.inv @ Transform(rotvec=(0, 0, np.deg2rad(i * deg))) @ to for i in range(n) ]
def mouseMoveEvent(self, e: QtGui.QMouseEvent): x_last, y_last = self.last_mouse_position x, y = e.x(), e.y() dx, dy = x - x_last, y - y_last self.last_mouse_position = x, y modifiers = QtWidgets.QApplication.keyboardModifiers() if self.mode == Mode.CROP: self.update_crop_drag_box(*self.crop_start, x, y) self.update() elif self.mode == Mode.ADJUST and self.obj and self.obj.pose: s = self.scale cam_t_center = self.cam_t_model @ Transform(p=self.obj.center) if modifiers == Qt.ControlModifier: # move xy ss = self.cam_t_model.p[2] * 1e-3 * s self.set_cam_t_model(Transform(p=(dx * ss, dy * ss, 0)) @ self.cam_t_model) elif modifiers == (Qt.ControlModifier | Qt.ShiftModifier): # move in depth dp = cam_t_center.p * dy * 2e-3 * s self.set_cam_t_model(Transform(p=dp) @ self.cam_t_model) elif modifiers == Qt.ShiftModifier: # rotate cam z cam_R_model = Transform(rotvec=(0, 0, -dx * 3e-3)).R @ self.cam_t_model.R center_travel = Transform(p=self.cam_t_model.p, R=cam_R_model) @ self.obj.center - cam_t_center.p self.set_cam_t_model(Transform(p=self.cam_t_model.p - center_travel, R=cam_R_model)) else: # rotate cam x y rot = Transform(rotvec=(dy * 3e-3, -dx * 3e-3, 0)) cam_R_model = rot.R @ self.cam_t_model.R center_travel = Transform(p=self.cam_t_model.p, R=cam_R_model) @ self.obj.center - cam_t_center.p self.set_cam_t_model(Transform(p=self.cam_t_model.p - center_travel, R=cam_R_model))
def load_scene(): table, b_base, b_tcp, c_base, c_tcp, cam_black, cam_white = SceneNode.n(7) table.adopt(b_base.adopt(b_tcp), c_base.adopt(c_tcp.adopt(cam_black, cam_white))) state = SceneState() state[table] = Transform() state[b_base] = Transform.load('calib/table_t_base_b') state[c_base] = Transform.load('calib/table_t_base_c') state[cam_black] = Transform.load('calib/tcp_t_cam_black') state[cam_white] = Transform.load('calib/tcp_t_cam_white') return Scene(table, b_base, b_tcp, c_base, c_tcp, cam_black, cam_white), state
def move(self, pos, theta=0., speed=1.2, acc=5., instant=False, record=False): world_t_tool_desired = Transform(p=pos, rpy=(0, np.pi, np.pi / 2 + theta)) if instant: self.set_q(self.ik(world_t_tool_desired)) else: world_t_tool_start = self.world_t_tool() tool_start_t_tool_desired = world_t_tool_start.inv @ world_t_tool_desired dist = np.linalg.norm(tool_start_t_tool_desired.xyz_rotvec) images = [] # qs = [] for i, s in enumerate(lerp(dist, speed, acc, self.dt)): world_t_tool_target = world_t_tool_start @ ( tool_start_t_tool_desired * s) self.set_q_target(self.ik(world_t_tool_target)) p.stepSimulation() if record and i % 4 == 0: # fps = 1/dt / 4 images.append(self.take_image()) # qs.append(self.q_target) # return images, qs return None
def _get_pose(Q, s): P = np.array([ [0, s, 0], [s, s, 0], [0, 0, 0], [s, 0, 0], ]) R, p = _kabsch(P, Q) return Transform(R=R, p=p)
def load_grasp_config(grasp_order_desired: List[str] = None): kit_root = Path(__file__).parent.parent cad_files = json.load(open(kit_root / 'cad_files.json')) grasp_config = json.load(open(kit_root / 'grasping' / 'grasp_config.json')) min_clearance = grasp_config['grasp_other_clearance'] if grasp_order_desired is None: grasp_order_desired = grasp_config['grasp_order_desired'] obj_grasp_configs = [] for name in grasp_order_desired: grasp_end_width = grasp_config['grasp_width'][name] grasp_start_width = grasp_end_width + from_dict( grasp_config['grasp_width_clearance'], name) sym_deg = grasp_config['grasp_sym_rot_deg'][name] sym_offset = from_dict(grasp_config['grasp_sym_offset'], name) mesh = trimesh.load_mesh(kit_root / 'stl' / f'{cad_files[name]}.stl') mesh.apply_scale(1e-3) tcp_t_obj = Transform.load(kit_root / 'grasping' / 'tcp_t_obj_grasp' / f'tcp_t_{name}_grasp') obj_grasp_configs.append((name, mesh, tcp_t_obj, grasp_start_width, grasp_end_width, sym_deg, sym_offset)) return obj_grasp_configs, min_clearance
def center(robot: Robot, axis, ft=None, t=2.): base_t_tcp = robot.base_t_tcp() limits = np.ones(6) axis_idx = pose_elements.index(axis) rot = axis_idx > 2 if ft is None: ft = 2 if rot else 15 if rot: selection = np.zeros(3, int) selection[axis_idx - 3] = 1 selection = (*(1 - selection), *selection) else: selection = np.zeros(6, int) selection[axis_idx] = 1 wrench = np.zeros(6) wrench[axis_idx] = ft transforms = [] # type: List[Transform] for m in 1, -1: robot.ctrl.forceMode(base_t_tcp, selection, wrench * m, 2, limits) settle(robot, t=t / 2) transforms.append(robot.base_t_tcp()) robot.ctrl.forceModeStop() a, b = transforms transform_center = Transform.lerp(a, b, 0.5) robot.ctrl.moveL(transform_center) return transform_center
def disc_sampling_search(self, radius: float, depth: float, poke_speed=0.03, move_speed=.25, move_acc=1.2, stop_condition=lambda: False, timeout=20.): # TODO: insertion direction base_t_tcp_init = self.base_t_tcp() start = time.time() while True: if time.time() - start > timeout: raise TimeoutError() x, y = utils.sample_hyper_sphere_volume(r=radius, d=2) tcp_init_t_tcp_next = Transform(p=(x, y, 0)) base_t_tcp_next = base_t_tcp_init @ tcp_init_t_tcp_next self.ctrl.moveL(base_t_tcp_next, move_speed, move_acc) try: self.speed_until_ft((0, 0, poke_speed), max_travel=depth) except DeviatingMotionError: return TerminateReason.TRAVEL_LIMIT if stop_condition(): return TerminateReason.STOP_CONDITION self.ctrl.moveL(base_t_tcp_next, move_speed, move_acc)
def spiral_search(self, tol: float, max_radius: float, push_force=5., speed=0.01, ft_stop=(15., 15., np.inf, 2., 2., 2.), stop_condition=lambda: False, lookahead_time=0.1, gain=600): # TODO: acc limit # TODO: insertion direction try: base_t_tcp_init = self.base_t_tcp() self.ctrl.forceModeStart(base_t_tcp_init, [0, 0, 1, 0, 0, 0], [0, 0, push_force, 0, 0, 0], 2, [0.05] * 6) a, b = spiral_utils.get_spiral_params(tol) theta = 0 while True: loop_start = time.time() theta = spiral_utils.next_theta_constant_speed(theta, a, b, speed=speed, dt=self.dt) r = spiral_utils.radius_from_theta(a, b, theta) if r > max_radius: raise DeviatingMotionError() x, y = np.cos(theta) * r - a, np.sin(theta) * r base_t_tcp = Transform.from_xyz_rotvec( self.recv.getActualTCPPose()) ft_tcp = base_t_tcp.inv.rotate(self.recv.getActualTCPForce()) if any(np.abs(ft_tcp) > ft_stop): return TerminateReason.FORCE_TORQUE_LIMIT elif stop_condition(): return TerminateReason.STOP_CONDITION tcp_start_t_tcp_next = Transform(p=(x, y, 0)) base_t_tcp_next = base_t_tcp_init @ tcp_start_t_tcp_next self.ctrl.servoL(base_t_tcp_next, 0.5, 0.25, self.dt, lookahead_time, gain) loop_duration = time.time() - loop_start time.sleep(max(0., self.dt - loop_duration)) finally: self.ctrl.servoStop() self.ctrl.forceModeStop()
def select_object(self, obj: Object): self.obj = obj for c in self.fl.children(): c.remove() if obj.pose is None: open_images = [img for img in self.scene.images if img.is_open] if open_images: image = open_images[0] else: image = self.scene.images[0] obj.pose = image.camera_pose @ Transform( p=(0, 0, obj.diameter * 3), rpy=(np.pi / 2, 0, 0)) obj.pose = obj.pose @ Transform(p=-obj.center) for overlay in self.overlays: overlay.render_overlay.set_obj(obj)
def _get_base_t_frame(self, base_t_frame: Frame): if isinstance(base_t_frame, str): if base_t_frame.lower() == 'base': base_t_frame = Transform() elif base_t_frame.lower() == 'tcp': base_t_frame = self.base_t_tcp() if not isinstance(base_t_frame, Transform): raise ValueError('{} not recognized as frame'.format(base_t_frame)) return base_t_frame
def prune(demonstration, min_dist=0.01, min_angle=np.deg2rad(10), min_q_dist=0.01, use_q_space=True): idxs = [0] for i, (q, pose) in enumerate(demonstration): if use_q_space: q_last = demonstration[idxs[-1]][0] if np.linalg.norm(q_last - q) > min_q_dist: idxs.append(i) else: T_last = Transform.from_xyz_rotvec(demonstration[idxs[-1]][1]) T = Transform.from_xyz_rotvec(pose) diff = T @ T_last.inv() dist, angle = np.linalg.norm(diff.t), np.linalg.norm( diff.r.as_rotvec()) if dist > min_dist or angle > min_angle: idxs.append(i) return np.array(idxs)
def build_finger_grasp_volume(grasp_width: float, w=0.032, h=0.035, bottom_width=0.007, base_height=0.005, tcp_z_finger_bottom=0.1915): one_side = [(bottom_width, 0), (w, h - base_height), (w, h * 3)] one_side = np.array(one_side) + (grasp_width, 0) poly = np.concatenate((one_side, one_side[::-1] * (-1, 1))) poly = Polygon(poly) mesh = extrude_polygon(poly, w) mesh.apply_translation((0, 0, -w / 2)) mesh.apply_transform(Transform(rotvec=(-np.pi / 2, 0, 0)).matrix) mesh.apply_translation((0, 0, tcp_z_finger_bottom)) return mesh
def move(self, pos, theta=0., speed=1.2, acc=5., instant=False, record=False, save=False): world_t_tool_desired = Transform(p=pos, rpy=(0, np.pi, np.pi / 2 + theta)) if instant: self.set_q(self.ik(world_t_tool_desired)) else: world_t_tool_start = self.world_t_tool() tool_start_t_tool_desired = world_t_tool_start.inv @ world_t_tool_desired dist = np.linalg.norm(tool_start_t_tool_desired.xyz_rotvec) images = [] states_l = [] for i, s in enumerate(lerp(dist, speed, acc, self.dt)): world_t_tool_target = world_t_tool_start @ ( tool_start_t_tool_desired * s) self.set_q_target(self.ik(world_t_tool_target)) p.stepSimulation() if record and i % 4 == 0: # fps = 1/dt / 4 images.append(self.take_image()) if save: # qs.append(self.q_target) cube_pos, cube_orn = p.getBasePositionAndOrientation( self.cube_id) # maybe from here we could get the q_dots? # p.setJointMotorControlArray( # self.rid, self.joint_idxs, p.POSITION_CONTROL, q, # forces=[100] * 7 + [15] * 2, positionGains=[1] * 9 # ) states_l.append( State(qs=self.q_target, q_dots=None, cube_pos=cube_pos, cube_orient=cube_orn, keypoint=None)) if save: return states_l else: return None
def main(): # record_data('data.npy', zero_payload=True) # quit() data = np.load('data.npy') base_t_tcps = [Transform.from_xyz_rotvec(d[:6]) for d in data] ft_tcps = [d[6:] for d in data] tool_mass, tcp_p_cog = calibrate_payload(base_t_tcps, ft_tcps) print(tool_mass, tcp_p_cog) r = Robot.from_ip('192.168.1.130') r.ctrl.setPayload(tool_mass, tcp_p_cog) data_after = np.load('data.npy') print('force max', np.abs(data_after[:, 6:9]).max()) print('force mean', np.abs(data_after[:, 6:9]).mean()) print('force rms', np.sqrt((data_after[:, 6:9]**2).mean())) print() print('torque max', np.abs(data_after[:, 9:]).max()) print('torque mean', np.abs(data_after[:, 9:]).mean())
def main(): import time from ..utils import load_tcp_t_cam, load_cam_intrinsics n = 40 images = [cv2.imread(f'sweep/{i}.png') for i in tqdm(range(n))] base_t_tcps = [ Transform.load(f'sweep/base_t_tcp_{i}') for i in tqdm(range(n)) ] tcp_t_cam = load_tcp_t_cam('A') K = load_cam_intrinsics('A')[0] start = time.time() points_base = reconstruct_points(images=images, base_t_tcps=base_t_tcps, tcp_t_cam=tcp_t_cam, K=K, debug=False) print(f'{time.time() - start:.2f} s') print(points_base) np.save('points_base.npy', points_base)
# scripts/example_use_poses.py from ur_control import Robot from transform3d import Transform import numpy as np r = Robot.from_ip('192.168.1.130') q_safe = (3.317805290222168, -1.3769071859172364, -1.9077906608581543, -1.4512933057597657, -4.752126518880026, -1.590332333241598) base_t_taskboard = Transform.load('base_t_taskboard') base_t_kitlayout = Transform.load('base_t_kitlayout') board_t_tcp_desired = Transform(p=(0, 0, 0.24), rotvec=(np.pi, 0, 0)) base_t_tcp = base_t_taskboard @ board_t_tcp_desired # uncomment the following line to move above the kitlayout frame # base_t_tcp = base_t_kitlayout @ board_t_tcp_desired r.ctrl.moveJ(q_safe) r.ctrl.moveL(base_t_tcp)
def servo(peg_robot: Robot, peg_tcp_node: SceneNode, scene_state: SceneState, servo_config: dict, camera_nodes: Sequence[SceneNode], aux_robots: Sequence[Robot] = (), aux_tcp_nodes: Sequence[SceneNode] = (), insertion_direction_tcp=np.array((0, 0, 1)), err_tolerance_scale=0.05, timeout=5., max_travel: float = None, max_travel_scale=3., alpha_target=0.9, alpha_err=0.9): state = scene_state.copy() crop_configs = servo_config['crop_configs'] insertion_direction_tcp = np.asarray( insertion_direction_tcp) / np.linalg.norm(insertion_direction_tcp) assert len(aux_robots) == len(aux_tcp_nodes) tcp_nodes = (peg_tcp_node, *aux_tcp_nodes) robots = (peg_robot, *aux_robots) assert len(servo_config['robots_q_init']) == len(robots) assert len(aux_robots) n_cams = len(crop_configs) z_ests, diameter_est = servo_config['z_ests'], servo_config['diameter_est'] assert n_cams == len(z_ests) == len(camera_nodes) err_tolerance = diameter_est * err_tolerance_scale if max_travel is None: max_travel = diameter_est * max_travel_scale assert crop.configure(servo_config) crop_K_invs = [ np.linalg.inv(get_roi_transform(crop_config) @ crop_config['K']) for crop_config in crop_configs ] cams_points = [None for _ in range(n_cams) ] # type: List[Union[None, (float, np.ndarray)]] new_data = [False] def handle_points(msg: Float64MultiArray, cam_idx: int): timestamp, points = msg.data[0], np.array(msg.data[1:]).reshape(2, 2) cams_points[cam_idx] = timestamp, points new_data[0] = True subs = [] for cam_idx in range(n_cams): subs.append( rospy.Subscriber('/servo/crop_{}/points'.format(cam_idx), Float64MultiArray, partial(handle_points, cam_idx=cam_idx), queue_size=1)) scene_configs_times = [] # type: List[float] scene_configs = [] # type: List[Sequence[Transform]] def add_current_scene_config(): scene_configs.append([r.base_t_tcp() for r in robots]) scene_configs_times.append(time.time()) def update_scene_state(timestamp): transforms = scene_configs[utils.bisect_closest( scene_configs_times, timestamp)] for tcp_node, transform in zip(tcp_nodes, transforms): state[tcp_node] = transform add_current_scene_config() update_scene_state(0) peg_tcp_init_node = SceneNode(parent=peg_tcp_node.parent) state[peg_tcp_init_node] = scene_configs[-1][0] peg_tcp_cur_node = SceneNode(parent=peg_tcp_node.parent) base_p_tcp_rolling = state[peg_tcp_init_node].p err_rolling, err_size_rolling = None, err_tolerance * 10 start = time.time() try: while err_size_rolling > err_tolerance: loop_start = time.time() add_current_scene_config() if new_data[0]: new_data[0] = False state[peg_tcp_cur_node] = scene_configs[-1][0] peg_tcp_init_t_peg_tcp = peg_tcp_init_node.t( peg_tcp_node, state) if np.linalg.norm(peg_tcp_init_t_peg_tcp.p) > max_travel: raise DeviatingMotionError() if rospy.is_shutdown(): raise RuntimeError() if loop_start - start > timeout: raise TimeoutError() # TODO: check for age of points # TODO: handle no camera inputs (raise appropriate error) move_dirs = [] move_errs = [] for cam_points, K_inv, cam_node, z_est in zip( cams_points, crop_K_invs, camera_nodes, z_ests): if cam_points is None: continue timestamp, cam_points = cam_points update_scene_state(timestamp) peg_tcp_t_cam = peg_tcp_node.t(cam_node, state) pts_peg_tcp = [] for p_img in cam_points: p_cam = K_inv @ (*p_img, 1) p_cam *= z_est / p_cam[2] pts_peg_tcp.append(peg_tcp_t_cam @ p_cam) hole_peg_tcp, peg_peg_tcp = pts_peg_tcp view_dir = (peg_peg_tcp + hole_peg_tcp) / 2 - peg_tcp_t_cam.p move_dir = np.cross(view_dir, insertion_direction_tcp) move_dir /= np.linalg.norm(move_dir) already_moved = state[peg_tcp_init_node].inv.rotate( state[peg_tcp_cur_node].p - state[peg_tcp_node].p) move_err = np.dot(move_dir, (hole_peg_tcp - peg_peg_tcp) - already_moved) move_dirs.append(move_dir) move_errs.append(move_err) move_dirs = np.array(move_dirs) move_errs = np.array(move_errs) if len(move_dirs) > 0: err_tcp, *_ = np.linalg.lstsq(move_dirs, move_errs, rcond=None) if err_rolling is None: err_rolling = err_tcp err_rolling = alpha_err * err_rolling + ( 1 - alpha_err) * err_tcp err_size_rolling = alpha_err * err_size_rolling + ( 1 - alpha_err) * np.linalg.norm(err_rolling) base_t_tcp_target = state[peg_tcp_cur_node] @ Transform( p=err_tcp) base_p_tcp_rolling = alpha_target * base_p_tcp_rolling + ( 1 - alpha_target) * base_t_tcp_target.p peg_robot.ctrl.servoL( (*base_p_tcp_rolling, *state[peg_tcp_init_node].rotvec), 0.5, 0.25, peg_robot.dt, 0.2, 300) loop_duration = time.time() - loop_start time.sleep(max(0., peg_robot.dt - loop_duration)) finally: peg_robot.ctrl.servoStop() for sub in subs: sub.unregister()
def determine_grasps(obj_grasp_configs, kit_t_objects: Dict[str, Transform], min_clearance, debug=False, debug_view_res=(800, 500)): if debug: scene = trimesh.Scene() for name, mesh, *_ in obj_grasp_configs: scene.add_geometry(mesh, transform=kit_t_objects[name].matrix, geom_name=name) scene.show(resolution=debug_view_res) lense = trimesh.load_mesh( Path(__file__).parent.parent / 'stl' / 'lense.stl') lense.apply_scale(1e-3) col_tool = CollisionManager() col_tool.add_object('lense', lense) col_objects = CollisionManager() for name, mesh, *_ in obj_grasp_configs: col_objects.add_object(name, mesh, transform=kit_t_objects[name].matrix) worst_possible_clearance = np.inf grasps = [] candidates = [*obj_grasp_configs] clearances = [] i = 0 while candidates: candidate = candidates[i] name, mesh, tcp_t_obj, grasp_start_width, grasp_end_width, sym_deg, sym_offset = candidate kit_t_obj = kit_t_objects[name] # move away the grasp obj to ignore obj-tcp collision col_objects.set_transform(name, Transform(p=(1000, 0, 0)).matrix) finger_grasp_volume = build_finger_grasp_volume(grasp_start_width) col_tool.add_object('finger_grasp_volume', finger_grasp_volume) kit_t_tcp = kit_t_obj @ tcp_t_obj.inv ts = rotational_grasp_symmetry(kit_t_tcp, sym_deg, sym_offset) dists, dists_name = [], [] for t in ts: col_tool.set_transform('lense', t.matrix) col_tool.set_transform('finger_grasp_volume', t.matrix) dist, dist_names = col_objects.min_distance_other( col_tool, return_names=True) if dist < worst_possible_clearance: worst_possible_clearance = dist dists.append(dist) dists_name.append(dist_names[0]) di = np.argmax(dists).item() kit_t_tcp, dist, dist_name = ts[di], dists[di], dists_name[di] tcp_t_obj = kit_t_tcp.inv @ kit_t_obj col_tool.remove_object('finger_grasp_volume') if dist > min_clearance: grasps.append( (name, tcp_t_obj, grasp_start_width, grasp_end_width)) candidates.remove(candidate) clearances.append(dist) i = 0 if debug: print(f'{name}: {dist:.3f}') scene.add_geometry(lense, transform=kit_t_tcp.matrix, geom_name='lense') scene.add_geometry(finger_grasp_volume, transform=kit_t_tcp.matrix, geom_name='finger_grasp_volume') scene.show(resolution=debug_view_res) scene.delete_geometry([name, 'lense', 'finger_grasp_volume']) else: if debug: print(f'!! {name}: {dist_name} {dist:.3f} - changing order') # move the grasp object back in place col_objects.set_transform(name, kit_t_obj.matrix) i += 1 if i == len(candidates): raise RuntimeError('cant find solution with this clearance') return grasps
2.2288451194763184, -1.490114526157715, 1.5134428183185022, -0.8572123807719727, -1.3860304991351526, -0.5580604712115687 ]) for q, r in zip(config['robots_q_init'], [r_b, r_c]): r.ctrl.moveJ(q) r_b.zero_ft_sensor() base_t_tcp_b_init = r_b.base_t_tcp() for _ in range(args.n): if rospy.is_shutdown(): break yz = np.random.randn(2) yz = yz / np.linalg.norm(yz) * max_err tcp_err = Transform(p=(0, *yz)) base_t_tcp_b = base_t_tcp_b_init @ tcp_err r_b.ctrl.moveL(base_t_tcp_b) peg_in_hole_visual_servoing.servo( peg_robot=r_b, aux_robots=[r_c], scene_state=state, peg_tcp_node=scene.b_tcp, aux_tcp_nodes=[scene.c_tcp], camera_nodes=[scene.cam_black, scene.cam_white], servo_config=config, insertion_direction_tcp=np.array((1, 0, 0)), alpha_target=0.95, timeout=10, )
def from_dict(d, k): return d[k] if k in d else d['default'] parser = argparse.ArgumentParser() parser.add_argument('--base-t-kitlayout', required=True) parser.add_argument('--obj-names', nargs='+', default=None) parser.add_argument('--layout', default=layouts.practice_fp) parser.add_argument('--get-grasp-position', action='store_true') parser.add_argument('--examine-grasp', action='store_true') parser.add_argument('--stop-at-grasp', action='store_true') parser.add_argument('--debug', action='store_true') args = parser.parse_args() base_t_kit = Transform.load(args.base_t_kitlayout) kit_t_objects = extract_kit_t_objects(args.layout, debug=args.debug) obj_grasp_configs, min_clearance = load_grasp_config(args.obj_names) grasps, min_clearance = determine_grasps_decreasing_clearance( obj_grasp_configs, kit_t_objects, min_clearance, debug=args.debug) gripper = get_gripper() gripper.open() r = Robot.from_ip('192.168.1.130') r.ctrl.moveL(r.base_t_tcp() @ Transform(p=(0, 0, -0.05))) q_safe = (2.8662071228027344, -1.7563158474364222, -1.9528794288635254, -1.0198443692973633, -4.752078358327047, -2.1280840078936976) q_drop = (3.0432159900665283, -2.0951763592162074, -1.622988224029541, -0.9942649167827149, -4.733094040547506, -1.5899961630450647)
from gripper import get_gripper parser = argparse.ArgumentParser() parser.add_argument('obj_names', nargs='+') parser.add_argument('--wait', type=float, default=1.) parser.add_argument('--dont-put-back', action='store_true') parser.add_argument('--stop-at-grasp', action='store_true') args = parser.parse_args() obj_names = args.obj_names gripper = get_gripper() gripper.open() r = Robot.from_ip('192.168.1.130') r.ctrl.moveL(r.base_t_tcp() @ Transform(p=(0, 0, -0.05))) q_safe = (2.8662071228027344, -1.7563158474364222, -1.9528794288635254, -1.0198443692973633, -4.752078358327047, -2.1280840078936976) grasp_start_widths = { 'big_round_peg': 25, 'small_round_peg': 17, 'big_gear': 40, 'small_gear': 26, 'bnc': 24, 'belt': 30, 'small_square_peg': 14, 'big_square_peg': 22, 'ethernet_cable_head': 22, }
parser = argparse.ArgumentParser() parser.add_argument('obj_name') args = parser.parse_args() obj_name = args.obj_name gripper = get_gripper() gripper.open() r = Robot.from_ip('192.168.1.130') q_safe = (3.317805290222168, -1.3769071859172364, -1.9077906608581543, -1.4512933057597657, -4.752126518880026, -1.590332333241598) r.ctrl.moveJ(q_safe) base_t_kit = Transform.load('base_t_kitlayout') kit_t_obj = Transform.load(f'kit_t_objects_practice/kit_t_{obj_name}') base_t_tcp = base_t_kit @ Transform(p=(*kit_t_obj.p[:2], 0.25), rotvec=(np.pi, 0, 0)) r.ctrl.moveL(base_t_tcp) r.ctrl.teachMode() input('enter when grasp pose is good') r.ctrl.endTeachMode() base_t_tcp = r.base_t_tcp() tcp_t_obj = base_t_tcp.inv @ base_t_kit @ kit_t_obj print('tcp_t_obj:')
def load_tcp_t_cam(name): return Transform.load(calib_folder / name / 'tcp_t_cam')
def load_table_t_base(name): return Transform.load(calib_folder / name / 'table_t_base')
def world_t_tool(self): pos, quat = p.getLinkState(self.rid, 11)[:2] return Transform(p=pos, quat=quat)
def mesh_to_vertnorms(mesh: trimesh.Trimesh): verts = mesh.vertices[mesh.faces].reshape(-1, 3) norms = np.tile(mesh.face_normals[:, None], (1, 3, 1)).reshape(-1, 3) # norms = mesh.vertex_normals[mesh.faces].reshape(-1, 3) return np.concatenate((verts, norms), -1).reshape(-1).astype('f4') def to_uniform(a): a = np.asarray(a, 'f4') if a.ndim == 2: a = a.T.reshape(-1) assert a.ndim == 1 return tuple(a) gl_t_cv = Transform(rpy=(np.pi, 0, 0)) def cvt_camera_matrix_cv_gl(camera_matrix: np.ndarray): camera_matrix_out = camera_matrix.copy() camera_matrix_out[:, 2] *= -1 return camera_matrix_out def farthest_point_sampling(all_points: np.ndarray, n: int): # (N, d) assert n >= 0 assert all_points.ndim == 2 sampled_points_idx = [] sampled_points = all_points.mean(axis=0, keepdims=True) for _ in range(n): dists = np.linalg.norm(all_points[:, None] - sampled_points[None],
def base_t_tcp(self): # TODO: possibly cache by default with a ttl ~≃ self.dt return Transform.from_xyz_rotvec(self.recv.getActualTCPPose())