def main(group=SE3): connect(use_gui=True) set_camera_pose(camera_point=SIZE*np.array([1., -1., 1.])) # TODO: can also create all links and fix some joints # TODO: SE(3) motion planner (like my SE(3) one) where some dimensions are fixed obstacle = create_box(w=SIZE, l=SIZE, h=SIZE, color=RED) #robot = create_cylinder(radius=0.025, height=0.1, color=BLUE) obstacles = [obstacle] collision_id, visual_id = create_shape(get_cylinder_geometry(radius=0.025, height=0.1), color=BLUE) robot = create_flying_body(group, collision_id, visual_id) body_link = get_links(robot)[-1] joints = get_movable_joints(robot) joint_from_group = dict(zip(group, joints)) print(joint_from_group) #print(get_aabb(robot, body_link)) dump_body(robot, fixed=False) custom_limits = {joint_from_group[j]: l for j, l in CUSTOM_LIMITS.items()} # sample_fn = get_sample_fn(robot, joints, custom_limits=custom_limits) # for i in range(10): # conf = sample_fn() # set_joint_positions(robot, joints, conf) # wait_for_user('Iteration: {}'.format(i)) # return initial_point = SIZE*np.array([-1., -1., 0]) #initial_point = -1.*np.ones(3) final_point = -initial_point initial_euler = np.zeros(3) add_line(initial_point, final_point, color=GREEN) initial_conf = np.concatenate([initial_point, initial_euler]) final_conf = np.concatenate([final_point, initial_euler]) set_joint_positions(robot, joints, initial_conf) #print(initial_point, get_link_pose(robot, body_link)) #set_pose(robot, Pose(point=-1.*np.ones(3))) path = plan_joint_motion(robot, joints, final_conf, obstacles=obstacles, self_collisions=False, custom_limits=custom_limits) if path is None: disconnect() return for i, conf in enumerate(path): set_joint_positions(robot, joints, conf) point, quat = get_link_pose(robot, body_link) #euler = euler_from_quat(quat) euler = intrinsic_euler_from_quat(quat) print(conf) print(point, euler) wait_for_user('Step: {}/{}'.format(i, len(path))) wait_for_user('Finish?') disconnect()
def main(use_pr2_drake=True): connect(use_gui=True) add_data_path() plane = p.loadURDF("plane.urdf") table_path = "models/table_collision/table.urdf" table = load_pybullet(table_path, fixed_base=True) set_quat(table, quat_from_euler(Euler(yaw=PI / 2))) # table/table.urdf, table_square/table_square.urdf, cube.urdf, block.urdf, door.urdf obstacles = [plane, table] pr2_urdf = DRAKE_PR2_URDF if use_pr2_drake else PR2_URDF with HideOutput(): pr2 = load_model(pr2_urdf, fixed_base=True) # TODO: suppress warnings? dump_body(pr2) z = base_aligned_z(pr2) print(z) #z = stable_z_on_aabb(pr2, AABB(np.zeros(3), np.zeros(3))) print(z) set_point(pr2, Point(z=z)) print(get_aabb(pr2)) wait_if_gui() base_start = (-2, -2, 0) base_goal = (2, 2, 0) arm_start = SIDE_HOLDING_LEFT_ARM #arm_start = TOP_HOLDING_LEFT_ARM #arm_start = REST_LEFT_ARM arm_goal = TOP_HOLDING_LEFT_ARM #arm_goal = SIDE_HOLDING_LEFT_ARM left_joints = joints_from_names(pr2, PR2_GROUPS['left_arm']) right_joints = joints_from_names(pr2, PR2_GROUPS['right_arm']) torso_joints = joints_from_names(pr2, PR2_GROUPS['torso']) set_joint_positions(pr2, left_joints, arm_start) set_joint_positions(pr2, right_joints, rightarm_from_leftarm(REST_LEFT_ARM)) set_joint_positions(pr2, torso_joints, [0.2]) open_arm(pr2, 'left') # test_ikfast(pr2) add_line(base_start, base_goal, color=RED) print(base_start, base_goal) if use_pr2_drake: test_drake_base_motion(pr2, base_start, base_goal, obstacles=obstacles) else: test_base_motion(pr2, base_start, base_goal, obstacles=obstacles) test_arm_motion(pr2, left_joints, arm_goal) # test_arm_control(pr2, left_joints, arm_start) wait_if_gui('Finish?') disconnect()
def test_retraction(robot, info, tool_link, distance=0.1, **kwargs): ik_joints = get_ik_joints(robot, info, tool_link) start_pose = get_link_pose(robot, tool_link) end_pose = multiply(start_pose, Pose(Point(z=-distance))) handles = [ add_line(point_from_pose(start_pose), point_from_pose(end_pose), color=BLUE) ] #handles.extend(draw_pose(start_pose)) #handles.extend(draw_pose(end_pose)) path = [] pose_path = list( interpolate_poses(start_pose, end_pose, pos_step_size=0.01)) for i, pose in enumerate(pose_path): print('Waypoint: {}/{}'.format(i + 1, len(pose_path))) handles.extend(draw_pose(pose)) conf = next( either_inverse_kinematics(robot, info, tool_link, pose, **kwargs), None) if conf is None: print('Failure!') path = None wait_for_user() break set_joint_positions(robot, ik_joints, conf) path.append(conf) wait_for_user() # for conf in islice(ikfast_inverse_kinematics(robot, info, tool_link, pose, max_attempts=INF, max_distance=0.5), 1): # set_joint_positions(robot, joints[:len(conf)], conf) # wait_for_user() remove_handles(handles) return path
def are_visible(world): ray_names = [] rays = [] for name in world.movable: for camera, info in world.cameras.items(): camera_pose = get_pose(info.body) camera_point = point_from_pose(camera_pose) point = point_from_pose(get_pose(world.get_body(name))) if is_visible_point(CAMERA_MATRIX, KINECT_DEPTH, point, camera_pose=camera_pose): ray_names.append(name) rays.append(Ray(camera_point, point)) ray_results = batch_ray_collision(rays) visible_indices = [ idx for idx, (name, result) in enumerate(zip(ray_names, ray_results)) if result.objectUniqueId == world.get_body(name) ] visible_names = {ray_names[idx] for idx in visible_indices} print('Detected:', sorted(visible_names)) if has_gui(): handles = [ add_line(rays[idx].start, rays[idx].end, color=BLUE) for idx in visible_indices ] wait_for_duration(1.0) remove_handles(handles) # TODO: the object drop seems to happen around here return visible_names
def draw_element(node_points, element, color=RED): if color is not None: # TODO: select a color when color=None color = color[:3] n1, n2 = element p1 = node_points[n1] p2 = node_points[n2] return add_line(p1, p2, color=color, width=LINE_WIDTH)
def at(self, time_from_start): current_conf = super(PrintTrajectory, self).at(time_from_start) if current_conf is None: if self.last_point is not None: #set_configuration(self.robot, INITIAL_CONF) # TODO: return here end_point = point_from_pose(self.tool_path[-1]) self.handles.append( add_line(self.last_point, end_point, color=RED)) self.last_point = None else: if self.last_point is None: self.last_point = point_from_pose(self.tool_path[0]) current_point = point_from_pose(self.end_effector.get_tool_pose()) self.handles.append( add_line(self.last_point, current_point, color=RED)) self.last_point = current_point return current_conf
def simulate_printing(node_points, trajectories, time_step=0.1, speed_up=10.): # TODO: deprecate print_trajectories = [ traj for traj in trajectories if isinstance(traj, PrintTrajectory) ] handles = [] current_time = 0. current_traj = print_trajectories.pop(0) current_curve = current_traj.interpolate_tool(node_points, start_time=current_time) current_position = current_curve.y[0] while True: print('Time: {:.3f} | Remaining: {} | Segments: {}'.format( current_time, len(print_trajectories), len(handles))) end_time = current_curve.x[-1] if end_time < current_time + time_step: handles.append( add_line(current_position, current_curve.y[-1], color=RED)) if not print_trajectories: break current_traj = print_trajectories.pop(0) current_curve = current_traj.interpolate_tool(node_points, start_time=end_time) current_position = current_curve.y[0] print( 'New trajectory | Start time: {:.3f} | End time: {:.3f} | Duration: {:.3f}' .format(current_curve.x[0], current_curve.x[-1], current_curve.x[-1] - current_curve.x[0])) else: current_time += time_step new_position = current_curve(current_time) handles.append(add_line(current_position, new_position, color=RED)) current_position = new_position # TODO: longer wait for recording videos wait_for_duration(time_step / speed_up) # wait_if_gui() wait_if_gui() return handles
def visualize_learned_pours(learner, bowl_type='blue_bowl', cup_type='red_cup', num_steps=None): learner.query_type = REJECTION # BEST | REJECTION | STRADDLE learner.validity_learner = None world = create_world() #add_obstacles() #environment = get_bodies() world.bodies[bowl_type] = load_cup_bowl(bowl_type) world.bodies[cup_type] = load_cup_bowl(cup_type) feature = get_pour_feature(world, bowl_type, cup_type) draw_pose(get_pose(world.get_body(bowl_type)), length=0.2) # TODO: sample different sizes print('Feature:', feature) for parameter in learner.parameter_generator(world, feature, valid=False): handles = [] print('Parameter:', parameter) valid = is_valid_pour(world, feature, parameter) score = learner.score(feature, parameter) x = learner.func.x_from_feature_parameter(feature, parameter) [prediction] = learner.predict_x(np.array([x]), noise=True) print('Valid: {} | Mean: {:.3f} | Var: {:.3f} | Score: {:.3f}'.format( valid, prediction['mean'], prediction['variance'], score)) #fraction = rescale(clip(prediction['mean'], *DEFAULT_INTERVAL), DEFAULT_INTERVAL, new_interval=(0, 1)) #color = (1 - fraction) * np.array(RED) + fraction * np.array(GREEN) #set_color(world.get_body(cup_type), apply_alpha(color, alpha=0.25)) # TODO: overlay many cups at different poses bowl_from_pivot = get_bowl_from_pivot(world, feature, parameter) #handles.extend(draw_pose(bowl_from_pivot)) handles.extend(draw_point(point_from_pose(bowl_from_pivot), color=BLACK)) # TODO: could label bowl, cup dimensions path, _ = pour_path_from_parameter(world, feature, parameter, cup_yaw=0) for p1, p2 in safe_zip(path[:-1], path[1:]): handles.append(add_line(point_from_pose(p1), point_from_pose(p2), color=RED)) if num_steps is not None: path = path[:num_steps] for i, pose in enumerate(path[::-1]): #step_handles = draw_pose(pose, length=0.1) #step_handles = draw_point(point_from_pose(pose), color=BLACK) set_pose(world.get_body(cup_type), pose) print('Step {}/{}'.format(i+1, len(path))) wait_for_user() #remove_handles(step_handles) remove_handles(handles)
def draw_last_roadmap(robot, joints, only_checked=False, linear=True, down_sample=None, **kwargs): q0 = get_joint_positions(robot, joints) handles = [] if not ROADMAPS: return handles roadmap = ROADMAPS[-1] for q in roadmap.samples: q = q if len(q) == 3 else np.append(q[:2], q0[2:]) # TODO: make a function handles.extend(draw_pose2d(q, z=DRAW_Z)) for v1, v2 in roadmap.edges: color = BLACK if roadmap.is_colliding(v1, v2): color = RED elif roadmap.is_safe(v1, v2): color = GREEN elif only_checked: continue if linear: path = [roadmap.samples[v1], roadmap.samples[v2]] else: path = roadmap.get_path(v1, v2) if down_sample is not None: path = path[::down_sample] + [path[-1]] #handles.extend(draw_path(path, **kwargs)) points = list( map(point_from_pose, [ pose_from_pose2d( q if len(q) == 3 else np.append(q[:2], q0[2:]), z=DRAW_Z) for q in path ])) handles.extend( add_line(p1, p2, color=color) for p1, p2 in get_pairs(points)) return handles
def draw_reaction(point, reaction, max_length=0.05, max_force=1, **kwargs): vector = max_length * np.array(reaction[:3]) / max_force end = point + vector return add_line(point, end, **kwargs)
def display_trajectories(node_points, ground_nodes, trajectories, animate=True, time_step=0.02, video=False): if trajectories is None: return set_extrusion_camera(node_points) planned_elements = recover_sequence(trajectories) colors = sample_colors(len(planned_elements)) # if not animate: # draw_ordered(planned_elements, node_points) # wait_for_user() # disconnect() # return video_saver = None if video: handles = draw_model(planned_elements, node_points, ground_nodes) # Allows user to adjust the camera wait_for_user() remove_all_debug() wait_for_duration(0.1) video_saver = VideoSaver('video.mp4') # has_gui() time_step = 0.001 else: wait_for_user() #element_bodies = dict(zip(planned_elements, create_elements(node_points, planned_elements))) #for body in element_bodies.values(): # set_color(body, (1, 0, 0, 0)) connected_nodes = set(ground_nodes) printed_elements = [] print('Trajectories:', len(trajectories)) for i, trajectory in enumerate(trajectories): #wait_for_user() #set_color(element_bodies[element], (1, 0, 0, 1)) last_point = None handles = [] for _ in trajectory.iterate(): # TODO: the robot body could be different if isinstance(trajectory, PrintTrajectory): current_point = point_from_pose( trajectory.end_effector.get_tool_pose()) if last_point is not None: # color = BLUE if is_ground(trajectory.element, ground_nodes) else RED color = colors[len(printed_elements)] handles.append( add_line(last_point, current_point, color=color, width=LINE_WIDTH)) last_point = current_point if time_step is None: wait_for_user() else: wait_for_duration(time_step) if isinstance(trajectory, PrintTrajectory): if not trajectory.path: color = colors[len(printed_elements)] handles.append( draw_element(node_points, trajectory.element, color=color)) #wait_for_user() is_connected = (trajectory.n1 in connected_nodes ) # and (trajectory.n2 in connected_nodes) print('{}) {:9} | Connected: {} | Ground: {} | Length: {}'.format( i, str(trajectory), is_connected, is_ground(trajectory.element, ground_nodes), len(trajectory.path))) if not is_connected: wait_for_user() connected_nodes.add(trajectory.n2) printed_elements.append(trajectory.element) if video_saver is not None: video_saver.restore() wait_for_user()
def draw_element(node_points, element, color=RED): n1, n2 = element p1 = node_points[n1] p2 = node_points[n2] return add_line(p1, p2, color=color[:3], width=LINE_WIDTH)
def main(): parser = argparse.ArgumentParser() # Automatically includes help parser.add_argument('-viewer', action='store_true', help='enable viewer.') args = parser.parse_args() connect(use_gui=True) #ycb_path = os.path.join(DRAKE_PATH, DRAKE_YCB) #ycb_path = os.path.join(YCB_PATH, YCB_TEMPLATE.format('003_cracker_box')) #print(ycb_path) #load_pybullet(ycb_path) with LockRenderer(): draw_pose(unit_pose(), length=1, width=1) floor = create_floor() set_point(floor, Point(z=-EPSILON)) table = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH / 2, height=TABLE_WIDTH / 2, top_color=TAN, cylinder=False) #set_euler(table, Euler(yaw=np.pi/2)) with HideOutput(False): # data_path = add_data_path() # robot_path = os.path.join(data_path, WSG_GRIPPER) robot_path = get_model_path( WSG_50_URDF) # WSG_50_URDF | PANDA_HAND_URDF #robot_path = get_file_path(__file__, 'mit_arch_suction_gripper/mit_arch_suction_gripper.urdf') robot = load_pybullet(robot_path, fixed_base=True) #dump_body(robot) #robot = create_cylinder(radius=0.5*BLOCK_SIDE, height=4*BLOCK_SIDE) # vacuum gripper block1 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=RED) block_z = stable_z(block1, table) set_point(block1, Point(z=block_z)) block2 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=GREEN) set_point(block2, Point(x=+0.25, z=block_z)) block3 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=BLUE) set_point(block3, Point(x=-0.15, z=block_z)) blocks = [block1, block2, block3] add_line(Point(x=-TABLE_WIDTH / 2, z=block_z - BLOCK_SIDE / 2 + EPSILON), Point(x=+TABLE_WIDTH / 2, z=block_z - BLOCK_SIDE / 2 + EPSILON), color=RED) set_camera_pose(camera_point=Point(y=-1, z=block_z + 1), target_point=Point(z=block_z)) wait_for_user() block_pose = get_pose(block1) open_gripper(robot) tool_link = link_from_name(robot, 'tool_link') base_from_tool = get_relative_pose(robot, tool_link) #draw_pose(unit_pose(), parent=robot, parent_link=tool_link) y_grasp, x_grasp = get_top_grasps(block1, tool_pose=unit_pose(), grasp_length=0.03, under=False) grasp = y_grasp # fingers won't collide gripper_pose = multiply(block_pose, invert(grasp)) set_pose(robot, multiply(gripper_pose, invert(base_from_tool))) wait_for_user('Finish?') disconnect()
def main(): # TODO: update this example connect(use_gui=True) with HideOutput(): pr2 = load_model(DRAKE_PR2_URDF) set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['left_arm']), REST_LEFT_ARM) set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['right_arm']), rightarm_from_leftarm(REST_LEFT_ARM)) set_joint_positions(pr2, joints_from_names(pr2, PR2_GROUPS['torso']), [0.2]) dump_body(pr2) block = load_model(BLOCK_URDF, fixed_base=False) set_point(block, [2, 0.5, 1]) target_point = point_from_pose(get_pose(block)) draw_point(target_point) head_joints = joints_from_names(pr2, PR2_GROUPS['head']) #head_link = child_link_from_joint(head_joints[-1]) #head_name = get_link_name(pr2, head_link) head_name = 'high_def_optical_frame' # HEAD_LINK_NAME | high_def_optical_frame | high_def_frame head_link = link_from_name(pr2, head_name) #max_detect_distance = 2.5 max_register_distance = 1.0 distance_range = (max_register_distance / 2, max_register_distance) base_generator = visible_base_generator(pr2, target_point, distance_range) base_joints = joints_from_names(pr2, PR2_GROUPS['base']) for i in range(5): base_conf = next(base_generator) set_joint_positions(pr2, base_joints, base_conf) handles = [ add_line(point_from_pose(get_link_pose(pr2, head_link)), target_point, color=RED), add_line(point_from_pose( get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))), target_point, color=BLUE), ] # head_conf = sub_inverse_kinematics(pr2, head_joints[0], HEAD_LINK, ) head_conf = inverse_visibility(pr2, target_point, head_name=head_name, head_joints=head_joints) assert head_conf is not None set_joint_positions(pr2, head_joints, head_conf) print(get_detections(pr2)) # TODO: does this detect the robot sometimes? detect_mesh, z = get_detection_cone(pr2, block) detect_cone = create_mesh(detect_mesh, color=(0, 1, 0, 0.5)) set_pose(detect_cone, get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))) view_cone = get_viewcone(depth=2.5, color=(1, 0, 0, 0.25)) set_pose(view_cone, get_link_pose(pr2, link_from_name(pr2, HEAD_LINK_NAME))) wait_if_gui() remove_body(detect_cone) remove_body(view_cone) disconnect()