def main(): parser = argparse.ArgumentParser() # Automatically includes help parser.add_argument('-arm', required=True) parser.add_argument('-grasp', required=True) parser.add_argument('-viewer', action='store_true', help='enable viewer.') args = parser.parse_args() arm = args.arm other_arm = get_other_arm(arm) grasp_type = args.grasp connect(use_gui=args.viewer) add_data_path() with HideOutput(): robot = load_pybullet(DRAKE_PR2_URDF) set_group_conf(robot, 'torso', [0.2]) set_arm_conf(robot, arm, get_carry_conf(arm, grasp_type)) set_arm_conf(robot, other_arm, arm_conf(other_arm, REST_LEFT_ARM)) #plane = p.loadURDF("plane.urdf") #table = p.loadURDF("table/table.urdf", 0, 0, 0, 0, 0, 0.707107, 0.707107) table = create_table() box = create_box(.07, .07, .14) #create_inverse_reachability(robot, box, table, arm=arm, grasp_type=grasp_type) create_inverse_reachability2(robot, box, table, arm=arm, grasp_type=grasp_type) disconnect()
def __init__(self, task, use_robot=True, visualize=False, **kwargs): self.task = task self.real_world = None self.visualize = visualize self.client_saver = None self.client = connect(use_gui=visualize, **kwargs) print('Planner connected to client {}.'.format(self.client)) self.robot = None with ClientSaver(self.client): with HideOutput(): if use_robot: self.robot = load_pybullet(os.path.join( get_models_path(), PR2_URDF), fixed_base=True) #dump_body(self.robot) #compute_joint_weights(self.robot) self.world_name = 'world' self.world_pose = Pose(unit_pose()) self.bodies = {} self.fixed = [] self.surfaces = [] self.items = [] #self.holding_liquid = [] self.initial_config = None self.initial_poses = {} self.body_mapping = {}
def load_plane(z=-1e-3): add_data_path() plane = load_pybullet('plane.urdf', fixed_base=True) #plane = load_model('plane.urdf') if z is not None: set_point(plane, Point(z=z)) return plane
def pick_problem(arm='left', grasp_type='side'): other_arm = get_other_arm(arm) initial_conf = get_carry_conf(arm, grasp_type) pr2 = create_pr2() set_base_values(pr2, (0, -1.2, np.pi / 2)) set_arm_conf(pr2, arm, initial_conf) open_arm(pr2, arm) set_arm_conf(pr2, other_arm, arm_conf(other_arm, REST_LEFT_ARM)) close_arm(pr2, other_arm) plane = create_floor() table = load_pybullet(TABLE_URDF) # table = create_table(height=0.8) # table = load_pybullet("table_square/table_square.urdf") box = create_box(.07, .05, .25) set_point(box, (-0.25, -0.3, TABLE_MAX_Z + .25 / 2)) # set_point(box, (0.2, -0.2, 0.8 + .25/2 + 0.1)) return Problem(robot=pr2, movable=[box], arms=[arm], grasp_types=[grasp_type], surfaces=[table], goal_conf=get_pose(pr2), goal_holding=[(arm, box)])
def main(): # TODO: move to pybullet-planning for now parser = argparse.ArgumentParser() parser.add_argument('-viewer', action='store_true', help='enable the viewer while planning') args = parser.parse_args() print(args) connect(use_gui=True) with LockRenderer(): draw_pose(unit_pose(), length=1) floor = create_floor() with HideOutput(): robot = load_pybullet(get_model_path(ROOMBA_URDF), fixed_base=True, scale=2.0) for link in get_all_links(robot): set_color(robot, link=link, color=ORANGE) robot_z = stable_z(robot, floor) set_point(robot, Point(z=robot_z)) #set_base_conf(robot, rover_confs[i]) data_path = add_data_path() shelf, = load_model(os.path.join(data_path, KIVA_SHELF_SDF), fixed_base=True) # TODO: returns a tuple dump_body(shelf) #draw_aabb(get_aabb(shelf)) wait_for_user() disconnect()
def add_scales(task, ros_world): scale_stackings = {} holding = {grasp.obj_name for grasp in task.init_holding.values()} with ClientSaver(ros_world.client): perception = ros_world.perception items = sorted(set(perception.get_items()) - holding, key=lambda n: get_point(ros_world.get_body(n))[1], reverse=False) # Right to left for i, item in enumerate(items): if not POUR and (get_type(item) != SCOOP_CUP): continue item_body = ros_world.get_body(item) scale = create_name(SCALE_TYPE, i + 1) with HideOutput(): scale_body = load_pybullet(get_body_urdf(get_type(scale)), fixed_base=True) ros_world.perception.sim_bodies[scale] = scale_body ros_world.perception.sim_items[scale] = None item_z = stable_z(item_body, scale_body) scale_pose_item = Pose( point=Point(z=-item_z)) # TODO: relies on origin in base set_pose(scale_body, multiply(get_pose(item_body), scale_pose_item)) roll, pitch, _ = get_euler(scale_body) set_euler(scale_body, [roll, pitch, 0]) scale_stackings[scale] = item #wait_for_user() return scale_stackings
def main(): parser = argparse.ArgumentParser() # Automatically includes help parser.add_argument('-viewer', action='store_true', help='enable viewer.') args = parser.parse_args() connect(use_gui=True) with LockRenderer(): draw_pose(unit_pose(), length=1, width=1) floor = create_floor() set_point(floor, Point(z=-EPSILON)) table1 = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH/2, height=TABLE_WIDTH/2, top_color=TAN, cylinder=False) set_point(table1, Point(y=+0.5)) table2 = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH/2, height=TABLE_WIDTH/2, top_color=TAN, cylinder=False) set_point(table2, Point(y=-0.5)) tables = [table1, table2] #set_euler(table1, Euler(yaw=np.pi/2)) with HideOutput(): # data_path = add_data_path() # robot_path = os.path.join(data_path, WSG_GRIPPER) robot_path = get_model_path(WSG_50_URDF) # WSG_50_URDF | PANDA_HAND_URDF robot = load_pybullet(robot_path, fixed_base=True) #dump_body(robot) block1 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=RED) block_z = stable_z(block1, table1) set_point(block1, Point(y=-0.5, z=block_z)) block2 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=GREEN) set_point(block2, Point(x=-0.25, y=-0.5, z=block_z)) block3 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=BLUE) set_point(block3, Point(x=-0.15, y=+0.5, z=block_z)) blocks = [block1, block2, block3] set_camera_pose(camera_point=Point(x=-1, z=block_z+1), target_point=Point(z=block_z)) block_pose = get_pose(block1) open_gripper(robot) tool_link = link_from_name(robot, 'tool_link') base_from_tool = get_relative_pose(robot, tool_link) #draw_pose(unit_pose(), parent=robot, parent_link=tool_link) grasps = get_side_grasps(block1, tool_pose=Pose(euler=Euler(yaw=np.pi/2)), top_offset=0.02, grasp_length=0.03, under=False)[1:2] for grasp in grasps: gripper_pose = multiply(block_pose, invert(grasp)) set_pose(robot, multiply(gripper_pose, invert(base_from_tool))) wait_for_user() wait_for_user('Finish?') disconnect()
def load_robot(): root_directory = os.path.dirname(os.path.abspath(__file__)) kuka_path = os.path.join(root_directory, KUKA_DIR, KUKA_PATH) with HideOutput(): robot = load_pybullet(kuka_path, fixed_base=True) #print([get_max_velocity(robot, joint) for joint in get_movable_joints(robot)]) set_static(robot) set_configuration(robot, INITIAL_CONF) return robot
def add_body(self, name, **kwargs): obj_type = type_from_name(name) self.names_from_type.setdefault(obj_type, []).append(name) path = get_obj_path(obj_type) #self.path_from_name[name] = path print('Loading', path) body = load_pybullet(path, **kwargs) assert body is not None self.add(name, body)
def main(use_pr2_drake=True): connect(use_gui=True) add_data_path() plane = p.loadURDF("plane.urdf") table_path = "models/table_collision/table.urdf" table = load_pybullet(table_path, fixed_base=True) set_quat(table, quat_from_euler(Euler(yaw=PI / 2))) # table/table.urdf, table_square/table_square.urdf, cube.urdf, block.urdf, door.urdf obstacles = [plane, table] pr2_urdf = DRAKE_PR2_URDF if use_pr2_drake else PR2_URDF with HideOutput(): pr2 = load_model(pr2_urdf, fixed_base=True) # TODO: suppress warnings? dump_body(pr2) z = base_aligned_z(pr2) print(z) #z = stable_z_on_aabb(pr2, AABB(np.zeros(3), np.zeros(3))) print(z) set_point(pr2, Point(z=z)) print(get_aabb(pr2)) wait_if_gui() base_start = (-2, -2, 0) base_goal = (2, 2, 0) arm_start = SIDE_HOLDING_LEFT_ARM #arm_start = TOP_HOLDING_LEFT_ARM #arm_start = REST_LEFT_ARM arm_goal = TOP_HOLDING_LEFT_ARM #arm_goal = SIDE_HOLDING_LEFT_ARM left_joints = joints_from_names(pr2, PR2_GROUPS['left_arm']) right_joints = joints_from_names(pr2, PR2_GROUPS['right_arm']) torso_joints = joints_from_names(pr2, PR2_GROUPS['torso']) set_joint_positions(pr2, left_joints, arm_start) set_joint_positions(pr2, right_joints, rightarm_from_leftarm(REST_LEFT_ARM)) set_joint_positions(pr2, torso_joints, [0.2]) open_arm(pr2, 'left') # test_ikfast(pr2) add_line(base_start, base_goal, color=RED) print(base_start, base_goal) if use_pr2_drake: test_drake_base_motion(pr2, base_start, base_goal, obstacles=obstacles) else: test_base_motion(pr2, base_start, base_goal, obstacles=obstacles) test_arm_motion(pr2, left_joints, arm_goal) # test_arm_control(pr2, left_joints, arm_start) wait_if_gui('Finish?') disconnect()
def _create_robot(self): with ClientSaver(self.client): with HideOutput(): pr2_path = os.path.join(get_models_path(), PR2_URDF) self.pr2 = load_pybullet(pr2_path, fixed_base=True) # Base link is the origin base_pose = get_link_pose(self.robot, link_from_name(self.robot, BASE_FRAME)) set_pose(self.robot, invert(base_pose)) return self.pr2
def add_holding(task, ros_world): with ClientSaver(ros_world.client): for arm, grasp in task.init_holding.items(): name = grasp.obj_name body = load_pybullet(get_body_urdf(get_type(name)), fixed_base=False) ros_world.perception.sim_bodies[name] = body ros_world.perception.sim_items[name] = None attachment = get_grasp_attachment(ros_world, arm, grasp) attachment.assign() ros_world.controller.attach(get_arm_prefix(arm), name)
def main(display='execute'): # control | execute | step # One of the arm's gantry carriage is fixed when the other is moving. connect(use_gui=True) set_camera(yaw=-90, pitch=-40, distance=10, target_position=(0, 7.5, 0)) draw_pose(unit_pose(), length=1.0) disable_real_time() with HideOutput(): root_directory = os.path.dirname(os.path.abspath(__file__)) robot = load_pybullet(os.path.join(root_directory, ETH_RFL_URDF), fixed_base=True) # floor = load_model('models/short_floor.urdf') block = load_model(BLOCK_URDF, fixed_base=False) #link = link_from_name(robot, 'gantry_base_link') #print(get_aabb(robot, link)) block_x = -0.2 #block_y = 1 if ARM == 'right' else 13.5 #block_x = 10 block_y = 5. # set_pose(floor, Pose(Point(x=floor_x, y=1, z=1.3))) # set_pose(block, Pose(Point(x=floor_x, y=0.6, z=stable_z(block, floor)))) set_pose(block, Pose(Point(x=block_x, y=block_y, z=3.5))) # set_default_camera() dump_world() #print(get_camera()) saved_world = WorldSaver() with LockRenderer(): command = plan(robot, block, fixed=[], teleport=False) # fixed=[floor], if (command is None) or (display is None): print('Unable to find a plan! Quit?') wait_for_interrupt() disconnect() return saved_world.restore() update_state() print('{}?'.format(display)) wait_for_interrupt() if display == 'control': enable_gravity() command.control(real_time=False, dt=0) elif display == 'execute': command.refine(num_steps=10).execute(time_step=0.002) elif display == 'step': command.step() else: raise ValueError(display) print('Quit?') wait_for_interrupt() disconnect()
def load_body(self, name, pose=None, fixed_base=False): assert name not in self.sim_bodies ty = get_type(name) with ClientSaver(self.client): with HideOutput(): body = load_cup_bowl(ty) if body is None: body = load_pybullet(get_body_urdf(name), fixed_base=fixed_base) if pose is not None: set_pose(body, pose) self.sim_bodies[name] = body return body
def main(): # https://github.com/ros-teleop/teleop_twist_keyboard # http://openrave.org/docs/latest_stable/_modules/openravepy/misc/#SetViewerUserThread connect(use_gui=True) add_data_path() load_pybullet("plane.urdf") #load_pybullet("models/table_collision/table.urdf") with HideOutput(): pr2 = load_model(DRAKE_PR2_URDF, fixed_base=True) enable_gravity() enable_real_time( ) # TODO: won't work as well on OS X due to simulation thread #run_simulate(pr2) run_thread(pr2) # TODO: keep working on this #userthread = threading.Thread(target=run_thread, args=[pr2]) #userthread.start() #userthread.join() disconnect()
def main(): parser = argparse.ArgumentParser() parser.add_argument('-shape', default='box', choices=['box', 'sphere', 'cylinder', 'capsule']) parser.add_argument('-video', action='store_true') args = parser.parse_args() video = 'video.mp4' if args.video else None connect(use_gui=True, mp4=video) if video is not None: set_renderer(enable=False) draw_global_system() set_camera_pose(camera_point=Point(+1.5, -1.5, +1.5), target_point=Point(-1.5, +1.5, 0)) add_data_path() plane = load_pybullet('plane.urdf', fixed_base=True) #plane = load_model('plane.urdf') set_point(plane, Point(z=-1e-3)) ramp = create_ramp() dump_body(ramp) obj = create_object(args.shape) set_euler(obj, np.random.uniform(-np.math.radians(1), np.math.radians(1), 3)) set_point(obj, np.random.uniform(-1e-3, +1e-3, 3)) #set_velocity(obj, linear=Point(x=-1)) set_position(obj, z=2.) #set_position(obj, z=base_aligned_z(obj)) dump_body(obj) #add_pose_constraint(obj, pose=(target_point, target_quat), max_force=200) set_renderer(enable=True) if video is None: wait_if_gui('Begin?') simulate(controller=condition_controller(lambda step: (step != 0) and at_rest(obj))) if video is None: wait_if_gui('Finish?') disconnect()
def main(display='execute'): # control | execute | step connect(use_gui=True) disable_real_time() with HideOutput(): root_directory = os.path.dirname(os.path.abspath(__file__)) robot = load_pybullet(os.path.join(root_directory, IRB6600_TRACK_URDF), fixed_base=True) floor = load_model('models/short_floor.urdf') block = load_model(BLOCK_URDF, fixed_base=False) floor_x = 2 set_pose(floor, Pose(Point(x=floor_x, z=0.5))) set_pose(block, Pose(Point(x=floor_x, y=0, z=stable_z(block, floor)))) # set_default_camera() dump_world() saved_world = WorldSaver() with LockRenderer(): command = plan(robot, block, fixed=[floor], teleport=False) if (command is None) or (display is None): print('Unable to find a plan!') print('Quit?') wait_for_interrupt() disconnect() return saved_world.restore() update_state() user_input('{}?'.format(display)) if display == 'control': enable_gravity() command.control(real_time=False, dt=0) elif display == 'execute': command.refine(num_steps=10).execute(time_step=0.002) elif display == 'step': command.step() else: raise ValueError(display) print('Quit?') wait_for_interrupt() disconnect()
def add_camera(self, name, pose, camera_matrix, max_depth=KINECT_DEPTH, display=False): cone = get_viewcone(depth=max_depth, camera_matrix=camera_matrix, color=apply_alpha(RED, 0.1), mass=0, collision=False) set_pose(cone, pose) if display: kinect = load_pybullet(KINECT_URDF, fixed_base=True) set_pose(kinect, pose) set_color(kinect, BLACK) self.add(name, kinect) self.cameras[name] = Camera(cone, camera_matrix, max_depth) draw_pose(pose) step_simulation() return name
def _initialize_environment(self): # wall to fridge: 4cm # fridge to goal: 1.5cm # hitman to range: 3.5cm # range to indigo: 3.5cm self.environment_poses = read_json(POSES_PATH) root_from_world = get_link_pose(self.kitchen, self.world_link) for name, world_from_part in self.environment_poses.items(): if name in ['range']: continue visual_path = os.path.join(KITCHEN_LEFT_PATH, '{}.obj'.format(name)) collision_path = os.path.join(KITCHEN_LEFT_PATH, '{}_collision.obj'.format(name)) mesh_path = None for path in [collision_path, visual_path]: if os.path.exists(path): mesh_path = path break if mesh_path is None: continue body = load_pybullet(mesh_path, fixed_base=True) root_from_part = multiply(root_from_world, world_from_part) if name in ['axe', 'dishwasher', 'echo', 'fox', 'golf']: (pos, quat) = root_from_part # TODO: still not totally aligned pos = np.array(pos) + np.array([0, -0.035, 0]) # , -0.005]) root_from_part = (pos, quat) self.environment_bodies[name] = body set_pose(body, root_from_part) # TODO: release bounding box or convex hull # TODO: static object nonconvex collisions if TABLE_NAME in self.environment_bodies: body = self.environment_bodies[TABLE_NAME] _, (w, l, _) = approximate_as_prism(body) _, _, z = get_point(body) new_pose = Pose(Point(TABLE_X + l / 2, -TABLE_Y, z), Euler(yaw=np.pi / 2)) set_pose(body, new_pose)
def main(): connect(use_gui=True) add_data_path() draw_pose(Pose(), length=1.) set_camera_pose(camera_point=[1, -1, 1]) plane = p.loadURDF("plane.urdf") with LockRenderer(): with HideOutput(True): robot = load_pybullet(FRANKA_URDF, fixed_base=True) assign_link_colors(robot, max_colors=3, s=0.5, v=1.) #set_all_color(robot, GREEN) obstacles = [plane] # TODO: collisions with the ground dump_body(robot) print('Start?') wait_for_user() info = PANDA_INFO tool_link = link_from_name(robot, 'panda_hand') draw_pose(Pose(), parent=robot, parent_link=tool_link) joints = get_movable_joints(robot) print('Joints', [get_joint_name(robot, joint) for joint in joints]) check_ik_solver(info) sample_fn = get_sample_fn(robot, joints) for i in range(10): print('Iteration:', i) conf = sample_fn() set_joint_positions(robot, joints, conf) wait_for_user() #test_ik(robot, info, tool_link, get_link_pose(robot, tool_link)) test_retraction(robot, info, tool_link, use_pybullet=False, max_distance=0.1, max_time=0.05, max_candidates=100) disconnect()
def create_table_bodies(world, item_ranges, randomize=True): perception = world.perception with HideOutput(): add_data_path() floor_body = load_pybullet("plane.urdf") set_pose(floor_body, get_pose(world.robot)) add_table(world) for name, limits in sorted(item_ranges.items()): perception.sim_items[name] = None if randomize: body = randomize_body(name, width_range=limits.width_range, height_range=limits.height_range, mass_range=limits.mass_range) else: body = load_body(name) perception.sim_bodies[name] = body # perception.add_item(name, unit_pose()) x, y, yaw = np.random.uniform(*limits.pose2d_range) surface_body = perception.get_body(limits.surface) z = stable_z(body, surface_body) + Z_EPSILON pose = Pose((x, y, z), Euler(yaw=yaw)) perception.set_pose(name, *pose)
def __init__(self, robot_name=FRANKA_CARTER, use_gui=True, full_kitchen=False): self.task = None self.interface = None self.client = connect(use_gui=use_gui) set_real_time(False) #set_caching(False) # Seems to make things worse disable_gravity() add_data_path() set_camera_pose(camera_point=[2, -1.5, 1]) if DEBUG: draw_pose(Pose(), length=3) #self.kitchen_yaml = load_yaml(KITCHEN_YAML) with HideOutput(enable=True): self.kitchen = load_pybullet(KITCHEN_PATH, fixed_base=True, cylinder=True) self.floor = load_pybullet('plane.urdf', fixed_base=True) z = stable_z(self.kitchen, self.floor) - get_point(self.floor)[2] point = np.array(get_point(self.kitchen)) - np.array([0, 0, z]) set_point(self.floor, point) self.robot_name = robot_name if self.robot_name == FRANKA_CARTER: urdf_path, yaml_path = FRANKA_CARTER_PATH, None #urdf_path, yaml_path = FRANKA_CARTER_PATH, FRANKA_YAML #elif self.robot_name == EVE: # urdf_path, yaml_path = EVE_PATH, None else: raise ValueError(self.robot_name) self.robot_yaml = yaml_path if yaml_path is None else load_yaml( yaml_path) with HideOutput(enable=True): self.robot = load_pybullet(urdf_path) #dump_body(self.robot) #chassis_pose = get_link_pose(self.robot, link_from_name(self.robot, 'chassis_link')) #wheel_pose = get_link_pose(self.robot, link_from_name(self.robot, 'left_wheel_link')) #wait_for_user() set_point(self.robot, Point(z=stable_z(self.robot, self.floor))) self.set_initial_conf() self.gripper = create_gripper(self.robot) self.environment_bodies = {} if full_kitchen: self._initialize_environment() self._initialize_ik(urdf_path) self.initial_saver = WorldSaver() self.body_from_name = {} # self.path_from_name = {} self.names_from_type = {} self.custom_limits = {} self.base_limits_handles = [] self.cameras = {} self.disabled_collisions = set() if self.robot_name == FRANKA_CARTER: self.disabled_collisions.update( tuple(link_from_name(self.robot, link) for link in pair) for pair in DISABLED_FRANKA_COLLISIONS) self.carry_conf = FConf(self.robot, self.arm_joints, self.default_conf) #self.calibrate_conf = Conf(self.robot, self.arm_joints, load_calibrate_conf(side='left')) self.calibrate_conf = FConf( self.robot, self.arm_joints, self.default_conf) # Must differ from carry_conf self.special_confs = [self.carry_conf] #, self.calibrate_conf] self.open_gq = FConf(self.robot, self.gripper_joints, get_max_limits(self.robot, self.gripper_joints)) self.closed_gq = FConf(self.robot, self.gripper_joints, get_min_limits(self.robot, self.gripper_joints)) self.gripper_confs = [self.open_gq, self.closed_gq] self.open_kitchen_confs = { joint: FConf(self.kitchen, [joint], [self.open_conf(joint)]) for joint in self.kitchen_joints } self.closed_kitchen_confs = { joint: FConf(self.kitchen, [joint], [self.closed_conf(joint)]) for joint in self.kitchen_joints } self._update_custom_limits() self._update_initial()
def load_pick_and_place(extrusion_name, scale=MILLIMETER, max_bricks=6): assert extrusion_name == 'choreo_brick_demo' root_directory = os.path.dirname(os.path.abspath(__file__)) bricks_directory = os.path.join(root_directory, PICKNPLACE_DIRECTORY, 'bricks') print('Name: {}'.format(extrusion_name)) with open( os.path.join(bricks_directory, PICKNPLACE_FILENAMES[extrusion_name]), 'r') as f: json_data = json.loads(f.read()) kuka_urdf = '../conrob_pybullet/models/kuka_kr6_r900/urdf/kuka_kr6_r900_mit_suction_gripper.urdf' obj_directory = os.path.join(bricks_directory, 'meshes', 'collision') with HideOutput(): #world = load_pybullet(os.path.join(bricks_directory, 'urdf', 'brick_demo.urdf')) robot = load_pybullet(os.path.join(root_directory, kuka_urdf), fixed_base=True) #set_point(robot, (0.14, 0, 0)) #dump_body(robot) pick_base_point = parse_point(json_data['pick_base_center_point']) draw_pose((pick_base_point, unit_quat())) place_base_point = parse_point(json_data['place_base_center_point']) draw_pose((place_base_point, unit_quat())) # workspace_geo_place_support_object_11 = pick_left_over_bricks_11 obstacle_from_name = {} for filename in json_data['pick_support_surface_file_names']: obstacle_from_name[strip_extension(filename)] = \ create_obj(os.path.join(obj_directory, filename), scale=scale, color=(0.5, 0.5, 0.5, 1)) for filename in json_data['place_support_surface_file_names']: obstacle_from_name[strip_extension(filename)] = \ create_obj(os.path.join(obj_directory, filename), scale=scale, color=(1., 0., 0., 1)) brick_from_index = {} for json_element in json_data['sequenced_elements']: index = json_element['order_id'] pick_body = create_obj(os.path.join( obj_directory, json_element['pick_element_geometry_file_name']), scale=scale, color=(0, 0, 1, 1)) add_body_name(pick_body, index) #place_body = create_obj(os.path.join(obj_directory, json_element['place_element_geometry_file_name']), # scale=scale, color=(0, 1, 0, 1)) #add_body_name(place_body, name) world_from_obj_pick = get_pose(pick_body) # [u'pick_grasp_plane', u'pick_grasp_retreat_plane', u'place_grasp_retreat_plane', # u'pick_grasp_approach_plane', u'place_grasp_approach_plane', u'place_grasp_plane'] ee_grasp_poses = [{ name: pose_from_tform(parse_transform(approach)) for name, approach in json_grasp.items() } for json_grasp in json_element['grasps']] # pick_grasp_plane is at the top of the object with z facing downwards grasp_index = 0 world_from_ee_pick = ee_grasp_poses[grasp_index]['pick_grasp_plane'] world_from_ee_place = ee_grasp_poses[grasp_index]['place_grasp_plane'] #draw_pose(world_from_ee_pick, length=0.04) #draw_pose(world_from_ee_place, length=0.04) ee_from_obj = multiply(invert(world_from_ee_pick), world_from_obj_pick) # Using pick frame world_from_obj_place = multiply(world_from_ee_place, ee_from_obj) #set_pose(pick_body, world_from_obj_place) grasps = [ Grasp( index, num, *[ multiply(invert(world_from_ee_pick[name]), world_from_obj_pick) for name in GRASP_NAMES ]) for num, world_from_ee_pick in enumerate(ee_grasp_poses) ] brick_from_index[index] = Brick( index=index, body=pick_body, initial_pose=WorldPose(index, world_from_obj_pick), goal_pose=WorldPose(index, world_from_obj_place), grasps=grasps, goal_supports=json_element.get('place_contact_ngh_ids', [])) # pick_contact_ngh_ids are movable element contact partial orders # pick_support_surface_file_names are fixed element contact partial orders return robot, brick_from_index, obstacle_from_name
def problem1(n_obstacles=10, wall_side=0.1, obst_width=0.25, obst_height=0.5): floor_extent = 5.0 base_limits = (-floor_extent / 2. * np.ones(2), +floor_extent / 2. * np.ones(2)) floor_height = 0.001 floor = create_box(floor_extent, floor_extent, floor_height, color=TAN) set_point(floor, Point(z=-floor_height / 2.)) wall1 = create_box(floor_extent + wall_side, wall_side, wall_side, color=GREY) set_point(wall1, Point(y=floor_extent / 2., z=wall_side / 2.)) wall2 = create_box(floor_extent + wall_side, wall_side, wall_side, color=GREY) set_point(wall2, Point(y=-floor_extent / 2., z=wall_side / 2.)) wall3 = create_box(wall_side, floor_extent + wall_side, wall_side, color=GREY) set_point(wall3, Point(x=floor_extent / 2., z=wall_side / 2.)) wall4 = create_box(wall_side, floor_extent + wall_side, wall_side, color=GREY) set_point(wall4, Point(x=-floor_extent / 2., z=wall_side / 2.)) wall5 = create_box(obst_width, obst_width, obst_height, color=GREY) set_point(wall5, Point(z=obst_height / 2.)) walls = [wall1, wall2, wall3, wall4, wall5] initial_surfaces = OrderedDict() for _ in range(n_obstacles - 1): body = create_box(obst_width, obst_width, obst_height, color=GREY) initial_surfaces[body] = floor pillars = list(initial_surfaces) obstacles = walls + pillars initial_conf = np.array([+floor_extent / 3, -floor_extent / 3, 3 * PI / 4]) goal_conf = -initial_conf robot = load_pybullet(TURTLEBOT_URDF, fixed_base=True, merge=True, sat=False) base_joints = joints_from_names(robot, BASE_JOINTS) # base_link = child_link_from_joint(base_joints[-1]) base_link = link_from_name(robot, BASE_LINK_NAME) set_all_color(robot, BLUE) dump_body(robot) set_point(robot, Point(z=stable_z(robot, floor))) #set_point(robot, Point(z=base_aligned_z(robot))) draw_pose(Pose(), parent=robot, parent_link=base_link, length=0.5) set_joint_positions(robot, base_joints, initial_conf) sample_placements( initial_surfaces, obstacles=[robot] + walls, savers=[BodySaver(robot, joints=base_joints, positions=goal_conf)], min_distances=10e-2) # The first calls appear to be the slowest # times = [] # for body1, body2 in combinations(pillars, r=2): # start_time = time.time() # colliding = pairwise_collision(body1, body2) # runtime = elapsed_time(start_time) # print(colliding, runtime) # times.append(runtime) # print(times) return robot, base_limits, goal_conf, obstacles
def main(): connect(use_gui=True) add_data_path() set_camera(0, -30, 1) plane = load_pybullet('plane.urdf', fixed_base=True) #plane = load_model('plane.urdf') cup = load_model('models/cup.urdf', fixed_base=True) #set_point(cup, Point(z=stable_z(cup, plane))) set_point(cup, Point(z=.2)) set_color(cup, (1, 0, 0, .4)) num_droplets = 100 #radius = 0.025 #radius = 0.005 radius = 0.0025 # TODO: more efficient ways to make all of these droplets = [create_sphere(radius, mass=0.01) for _ in range(num_droplets)] # kg cup_thickness = 0.001 lower, upper = get_lower_upper(cup) print(lower, upper) buffer = cup_thickness + radius lower = np.array(lower) + buffer * np.ones(len(lower)) upper = np.array(upper) - buffer * np.ones(len(upper)) limits = zip(lower, upper) x_range, y_range = limits[:2] z = upper[2] + 0.1 #x_range = [-1, 1] #y_range = [-1, 1] #z = 1 for droplet in droplets: x = np.random.uniform(*x_range) y = np.random.uniform(*y_range) set_point(droplet, Point(x, y, z)) for i, droplet in enumerate(droplets): x, y = np.random.normal(0, 1e-3, 2) set_point(droplet, Point(x, y, z + i * (2 * radius + 1e-3))) #dump_world() wait_for_user() #user_input('Start?') enable_gravity() simulate_for_duration(5.0) # enable_real_time() # try: # while True: # enable_gravity() # enable_real_time requires a command # #time.sleep(dt) # except KeyboardInterrupt: # pass # print() #time.sleep(1.0) wait_for_user('Finish?') disconnect()
def add_body(self, name, fixed=True): self.bodies[name] = load_pybullet(get_body_urdf(name), fixed_base=fixed) return self.bodies[name]
def main(use_turtlebot=True): parser = argparse.ArgumentParser() parser.add_argument('-sim', action='store_true') parser.add_argument('-video', action='store_true') args = parser.parse_args() video = 'video.mp4' if args.video else None connect(use_gui=True, mp4=video) #set_renderer(enable=False) # print(list_pybullet_data()) # print(list_pybullet_robots()) draw_global_system() set_camera_pose(camera_point=Point(+1.5, -1.5, +1.5), target_point=Point(-1.5, +1.5, 0)) plane = load_plane() #door = load_pybullet('models/door.urdf', fixed_base=True) # From drake #set_point(door, Point(z=-.1)) door = create_door() #set_position(door, z=base_aligned_z(door)) set_point(door, base_aligned(door)) #set_collision_margin(door, link=0, margin=0.) set_configuration(door, [math.radians(-5)]) dump_body(door) door_joint = get_movable_joints(door)[0] door_link = child_link_from_joint(door_joint) #draw_pose(get_com_pose(door, door_link), parent=door, parent_link=door_link) draw_pose(Pose(), parent=door, parent_link=door_link) wait_if_gui() ########## start_x = +2 target_x = -start_x if not use_turtlebot: side = 0.25 robot = create_box(w=side, l=side, h=side, mass=5., color=BLUE) set_position(robot, x=start_x) #set_velocity(robot, linear=Point(x=-1)) else: turtlebot_urdf = os.path.abspath(TURTLEBOT_URDF) print(turtlebot_urdf) #print(read(turtlebot_urdf)) robot = load_pybullet(turtlebot_urdf, merge=True, fixed_base=True) robot_joints = get_movable_joints(robot)[:3] set_joint_positions(robot, robot_joints, [start_x, 0, PI]) set_all_color(robot, BLUE) set_position(robot, z=base_aligned_z(robot)) dump_body(robot) ########## set_renderer(enable=True) #test_door(door) if args.sim: test_simulation(robot, target_x, video) else: assert use_turtlebot # TODO: extend to the block test_kinematic(robot, door, target_x) disconnect()
def main(): parser = argparse.ArgumentParser() # Automatically includes help parser.add_argument('-viewer', action='store_true', help='enable viewer.') args = parser.parse_args() connect(use_gui=True) #ycb_path = os.path.join(DRAKE_PATH, DRAKE_YCB) #ycb_path = os.path.join(YCB_PATH, YCB_TEMPLATE.format('003_cracker_box')) #print(ycb_path) #load_pybullet(ycb_path) with LockRenderer(): draw_pose(unit_pose(), length=1, width=1) floor = create_floor() set_point(floor, Point(z=-EPSILON)) table = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH / 2, height=TABLE_WIDTH / 2, top_color=TAN, cylinder=False) #set_euler(table, Euler(yaw=np.pi/2)) with HideOutput(False): # data_path = add_data_path() # robot_path = os.path.join(data_path, WSG_GRIPPER) robot_path = get_model_path( WSG_50_URDF) # WSG_50_URDF | PANDA_HAND_URDF #robot_path = get_file_path(__file__, 'mit_arch_suction_gripper/mit_arch_suction_gripper.urdf') robot = load_pybullet(robot_path, fixed_base=True) #dump_body(robot) #robot = create_cylinder(radius=0.5*BLOCK_SIDE, height=4*BLOCK_SIDE) # vacuum gripper block1 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=RED) block_z = stable_z(block1, table) set_point(block1, Point(z=block_z)) block2 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=GREEN) set_point(block2, Point(x=+0.25, z=block_z)) block3 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=BLUE) set_point(block3, Point(x=-0.15, z=block_z)) blocks = [block1, block2, block3] add_line(Point(x=-TABLE_WIDTH / 2, z=block_z - BLOCK_SIDE / 2 + EPSILON), Point(x=+TABLE_WIDTH / 2, z=block_z - BLOCK_SIDE / 2 + EPSILON), color=RED) set_camera_pose(camera_point=Point(y=-1, z=block_z + 1), target_point=Point(z=block_z)) wait_for_user() block_pose = get_pose(block1) open_gripper(robot) tool_link = link_from_name(robot, 'tool_link') base_from_tool = get_relative_pose(robot, tool_link) #draw_pose(unit_pose(), parent=robot, parent_link=tool_link) y_grasp, x_grasp = get_top_grasps(block1, tool_pose=unit_pose(), grasp_length=0.03, under=False) grasp = y_grasp # fingers won't collide gripper_pose = multiply(block_pose, invert(grasp)) set_pose(robot, multiply(gripper_pose, invert(base_from_tool))) wait_for_user('Finish?') disconnect()