def load_json_problem(problem_filename): reset_simulation() json_path = os.path.join(get_json_directory(), problem_filename) with open(json_path, 'r') as f: problem_json = json.loads(f.read()) set_camera_pose(point_from_pose(parse_pose(problem_json['camera']))) task_json = problem_json['task'] important_bodies = [] for field in BODY_FIELDS: important_bodies.extend(task_json[field]) robots = { robot['name']: parse_robot(robot) for robot in problem_json['robots'] } bodies = { body['name']: parse_body(body, (body['name'] in important_bodies)) for body in problem_json['bodies'] } regions = { region['name']: parse_region(region) for region in task_json['regions'] } #print(get_image()) return parse_task(task_json, robots, bodies, regions)
def main(): parser = argparse.ArgumentParser() # Automatically includes help parser.add_argument('-viewer', action='store_true', help='enable viewer.') args = parser.parse_args() connect(use_gui=True) with LockRenderer(): draw_pose(unit_pose(), length=1, width=1) floor = create_floor() set_point(floor, Point(z=-EPSILON)) table1 = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH/2, height=TABLE_WIDTH/2, top_color=TAN, cylinder=False) set_point(table1, Point(y=+0.5)) table2 = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH/2, height=TABLE_WIDTH/2, top_color=TAN, cylinder=False) set_point(table2, Point(y=-0.5)) tables = [table1, table2] #set_euler(table1, Euler(yaw=np.pi/2)) with HideOutput(): # data_path = add_data_path() # robot_path = os.path.join(data_path, WSG_GRIPPER) robot_path = get_model_path(WSG_50_URDF) # WSG_50_URDF | PANDA_HAND_URDF robot = load_pybullet(robot_path, fixed_base=True) #dump_body(robot) block1 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=RED) block_z = stable_z(block1, table1) set_point(block1, Point(y=-0.5, z=block_z)) block2 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=GREEN) set_point(block2, Point(x=-0.25, y=-0.5, z=block_z)) block3 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=BLUE) set_point(block3, Point(x=-0.15, y=+0.5, z=block_z)) blocks = [block1, block2, block3] set_camera_pose(camera_point=Point(x=-1, z=block_z+1), target_point=Point(z=block_z)) block_pose = get_pose(block1) open_gripper(robot) tool_link = link_from_name(robot, 'tool_link') base_from_tool = get_relative_pose(robot, tool_link) #draw_pose(unit_pose(), parent=robot, parent_link=tool_link) grasps = get_side_grasps(block1, tool_pose=Pose(euler=Euler(yaw=np.pi/2)), top_offset=0.02, grasp_length=0.03, under=False)[1:2] for grasp in grasps: gripper_pose = multiply(block_pose, invert(grasp)) set_pose(robot, multiply(gripper_pose, invert(base_from_tool))) wait_for_user() wait_for_user('Finish?') disconnect()
def main(group=SE3): connect(use_gui=True) set_camera_pose(camera_point=SIZE*np.array([1., -1., 1.])) # TODO: can also create all links and fix some joints # TODO: SE(3) motion planner (like my SE(3) one) where some dimensions are fixed obstacle = create_box(w=SIZE, l=SIZE, h=SIZE, color=RED) #robot = create_cylinder(radius=0.025, height=0.1, color=BLUE) obstacles = [obstacle] collision_id, visual_id = create_shape(get_cylinder_geometry(radius=0.025, height=0.1), color=BLUE) robot = create_flying_body(group, collision_id, visual_id) body_link = get_links(robot)[-1] joints = get_movable_joints(robot) joint_from_group = dict(zip(group, joints)) print(joint_from_group) #print(get_aabb(robot, body_link)) dump_body(robot, fixed=False) custom_limits = {joint_from_group[j]: l for j, l in CUSTOM_LIMITS.items()} # sample_fn = get_sample_fn(robot, joints, custom_limits=custom_limits) # for i in range(10): # conf = sample_fn() # set_joint_positions(robot, joints, conf) # wait_for_user('Iteration: {}'.format(i)) # return initial_point = SIZE*np.array([-1., -1., 0]) #initial_point = -1.*np.ones(3) final_point = -initial_point initial_euler = np.zeros(3) add_line(initial_point, final_point, color=GREEN) initial_conf = np.concatenate([initial_point, initial_euler]) final_conf = np.concatenate([final_point, initial_euler]) set_joint_positions(robot, joints, initial_conf) #print(initial_point, get_link_pose(robot, body_link)) #set_pose(robot, Pose(point=-1.*np.ones(3))) path = plan_joint_motion(robot, joints, final_conf, obstacles=obstacles, self_collisions=False, custom_limits=custom_limits) if path is None: disconnect() return for i, conf in enumerate(path): set_joint_positions(robot, joints, conf) point, quat = get_link_pose(robot, body_link) #euler = euler_from_quat(quat) euler = intrinsic_euler_from_quat(quat) print(conf) print(point, euler) wait_for_user('Step: {}/{}'.format(i, len(path))) wait_for_user('Finish?') disconnect()
def set_extrusion_camera(node_points, theta=-math.pi / 4, distance=0.25, height=0.25): centroid = np.average(node_points, axis=0) x, y = distance * unit_from_theta(theta) camera_offset = np.array([x, y, height]) set_camera_pose(camera_point=centroid + camera_offset, target_point=centroid)
def create_world(): world = PlanningWorld(task=None, use_robot=False, visualize=True, color=BACKGROUND_COLOR, shadows=False) add_data_path() environment = [ #load_pybullet('models/short_floor.urdf'), #load_pybullet('plane.urdf', fixed_base=True), create_plane(color=GROUND_COLOR), ] set_camera_pose(camera_point=0.4*np.array([0, -1.0, 0.5])) #draw_pose(unit_pose(), length=1) return world
def update_world(world, target_body, camera_point=VIEWER_POINT): pr2 = world.perception.pr2 with ClientSaver(world.perception.client): open_arm(pr2, LEFT_ARM) open_arm(pr2, RIGHT_ARM) set_group_conf(pr2, 'torso', [TORSO_POSITION]) set_group_conf(pr2, arm_from_arm('left'), WIDE_LEFT_ARM) set_group_conf(pr2, arm_from_arm('right'), rightarm_from_leftarm(WIDE_LEFT_ARM)) target_point = get_point(target_body) head_conf = inverse_visibility(pr2, target_point, head_name=CAMERA_OPTICAL_FRAME) # Must come after torso set_group_conf(pr2, 'head', head_conf) set_camera_pose(camera_point, target_point=target_point)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-shape', default='box', choices=['box', 'sphere', 'cylinder', 'capsule']) parser.add_argument('-video', action='store_true') args = parser.parse_args() video = 'video.mp4' if args.video else None connect(use_gui=True, mp4=video) if video is not None: set_renderer(enable=False) draw_global_system() set_camera_pose(camera_point=Point(+1.5, -1.5, +1.5), target_point=Point(-1.5, +1.5, 0)) add_data_path() plane = load_pybullet('plane.urdf', fixed_base=True) #plane = load_model('plane.urdf') set_point(plane, Point(z=-1e-3)) ramp = create_ramp() dump_body(ramp) obj = create_object(args.shape) set_euler(obj, np.random.uniform(-np.math.radians(1), np.math.radians(1), 3)) set_point(obj, np.random.uniform(-1e-3, +1e-3, 3)) #set_velocity(obj, linear=Point(x=-1)) set_position(obj, z=2.) #set_position(obj, z=base_aligned_z(obj)) dump_body(obj) #add_pose_constraint(obj, pose=(target_point, target_quat), max_force=200) set_renderer(enable=True) if video is None: wait_if_gui('Begin?') simulate(controller=condition_controller(lambda step: (step != 0) and at_rest(obj))) if video is None: wait_if_gui('Finish?') disconnect()
def main(): connect(use_gui=True) add_data_path() draw_pose(Pose(), length=1.) set_camera_pose(camera_point=[1, -1, 1]) plane = p.loadURDF("plane.urdf") with LockRenderer(): with HideOutput(True): robot = load_pybullet(FRANKA_URDF, fixed_base=True) assign_link_colors(robot, max_colors=3, s=0.5, v=1.) #set_all_color(robot, GREEN) obstacles = [plane] # TODO: collisions with the ground dump_body(robot) print('Start?') wait_for_user() info = PANDA_INFO tool_link = link_from_name(robot, 'panda_hand') draw_pose(Pose(), parent=robot, parent_link=tool_link) joints = get_movable_joints(robot) print('Joints', [get_joint_name(robot, joint) for joint in joints]) check_ik_solver(info) sample_fn = get_sample_fn(robot, joints) for i in range(10): print('Iteration:', i) conf = sample_fn() set_joint_positions(robot, joints, conf) wait_for_user() #test_ik(robot, info, tool_link, get_link_pose(robot, tool_link)) test_retraction(robot, info, tool_link, use_pybullet=False, max_distance=0.1, max_time=0.05, max_candidates=100) disconnect()
def main(): benchmark = 'tmp-benchmark-data' problem = 'problem1' # Hanoi #problem = 'problem2' # Blocksworld #problem = 'problem3' # Clutter #problem = 'problem4' # Nonmono root_directory = os.path.dirname(os.path.abspath(__file__)) directory = os.path.join(root_directory, '..', 'problems', benchmark, problem) [mesh_directory] = list( filter(os.path.isdir, (os.path.join(directory, o) for o in os.listdir(directory) if o.endswith('meshes')))) [xml_path] = [ os.path.join(directory, o) for o in os.listdir(directory) if o.endswith('xml') ] if os.path.isdir(xml_path): xml_path = glob.glob(os.path.join(xml_path, '*.xml'))[0] print(mesh_directory) print(xml_path) xml_data = etree.parse(xml_path) connect(use_gui=True) #add_data_path() #load_pybullet("plane.urdf") draw_global_system() set_camera_pose(camera_point=[+1, -1, 1]) #root = xml_data.getroot() #print(root.items()) for obj in xml_data.findall('/objects/obj'): parse_object(obj, mesh_directory) for robot in xml_data.findall('/robots/robot'): parse_robot(robot) wait_if_gui() disconnect()
def main(use_turtlebot=True): parser = argparse.ArgumentParser() parser.add_argument('-sim', action='store_true') parser.add_argument('-video', action='store_true') args = parser.parse_args() video = 'video.mp4' if args.video else None connect(use_gui=True, mp4=video) #set_renderer(enable=False) # print(list_pybullet_data()) # print(list_pybullet_robots()) draw_global_system() set_camera_pose(camera_point=Point(+1.5, -1.5, +1.5), target_point=Point(-1.5, +1.5, 0)) plane = load_plane() #door = load_pybullet('models/door.urdf', fixed_base=True) # From drake #set_point(door, Point(z=-.1)) door = create_door() #set_position(door, z=base_aligned_z(door)) set_point(door, base_aligned(door)) #set_collision_margin(door, link=0, margin=0.) set_configuration(door, [math.radians(-5)]) dump_body(door) door_joint = get_movable_joints(door)[0] door_link = child_link_from_joint(door_joint) #draw_pose(get_com_pose(door, door_link), parent=door, parent_link=door_link) draw_pose(Pose(), parent=door, parent_link=door_link) wait_if_gui() ########## start_x = +2 target_x = -start_x if not use_turtlebot: side = 0.25 robot = create_box(w=side, l=side, h=side, mass=5., color=BLUE) set_position(robot, x=start_x) #set_velocity(robot, linear=Point(x=-1)) else: turtlebot_urdf = os.path.abspath(TURTLEBOT_URDF) print(turtlebot_urdf) #print(read(turtlebot_urdf)) robot = load_pybullet(turtlebot_urdf, merge=True, fixed_base=True) robot_joints = get_movable_joints(robot)[:3] set_joint_positions(robot, robot_joints, [start_x, 0, PI]) set_all_color(robot, BLUE) set_position(robot, z=base_aligned_z(robot)) dump_body(robot) ########## set_renderer(enable=True) #test_door(door) if args.sim: test_simulation(robot, target_x, video) else: assert use_turtlebot # TODO: extend to the block test_kinematic(robot, door, target_x) disconnect()
def __init__(self, robot_name=FRANKA_CARTER, use_gui=True, full_kitchen=False): self.task = None self.interface = None self.client = connect(use_gui=use_gui) set_real_time(False) #set_caching(False) # Seems to make things worse disable_gravity() add_data_path() set_camera_pose(camera_point=[2, -1.5, 1]) if DEBUG: draw_pose(Pose(), length=3) #self.kitchen_yaml = load_yaml(KITCHEN_YAML) with HideOutput(enable=True): self.kitchen = load_pybullet(KITCHEN_PATH, fixed_base=True, cylinder=True) self.floor = load_pybullet('plane.urdf', fixed_base=True) z = stable_z(self.kitchen, self.floor) - get_point(self.floor)[2] point = np.array(get_point(self.kitchen)) - np.array([0, 0, z]) set_point(self.floor, point) self.robot_name = robot_name if self.robot_name == FRANKA_CARTER: urdf_path, yaml_path = FRANKA_CARTER_PATH, None #urdf_path, yaml_path = FRANKA_CARTER_PATH, FRANKA_YAML #elif self.robot_name == EVE: # urdf_path, yaml_path = EVE_PATH, None else: raise ValueError(self.robot_name) self.robot_yaml = yaml_path if yaml_path is None else load_yaml( yaml_path) with HideOutput(enable=True): self.robot = load_pybullet(urdf_path) #dump_body(self.robot) #chassis_pose = get_link_pose(self.robot, link_from_name(self.robot, 'chassis_link')) #wheel_pose = get_link_pose(self.robot, link_from_name(self.robot, 'left_wheel_link')) #wait_for_user() set_point(self.robot, Point(z=stable_z(self.robot, self.floor))) self.set_initial_conf() self.gripper = create_gripper(self.robot) self.environment_bodies = {} if full_kitchen: self._initialize_environment() self._initialize_ik(urdf_path) self.initial_saver = WorldSaver() self.body_from_name = {} # self.path_from_name = {} self.names_from_type = {} self.custom_limits = {} self.base_limits_handles = [] self.cameras = {} self.disabled_collisions = set() if self.robot_name == FRANKA_CARTER: self.disabled_collisions.update( tuple(link_from_name(self.robot, link) for link in pair) for pair in DISABLED_FRANKA_COLLISIONS) self.carry_conf = FConf(self.robot, self.arm_joints, self.default_conf) #self.calibrate_conf = Conf(self.robot, self.arm_joints, load_calibrate_conf(side='left')) self.calibrate_conf = FConf( self.robot, self.arm_joints, self.default_conf) # Must differ from carry_conf self.special_confs = [self.carry_conf] #, self.calibrate_conf] self.open_gq = FConf(self.robot, self.gripper_joints, get_max_limits(self.robot, self.gripper_joints)) self.closed_gq = FConf(self.robot, self.gripper_joints, get_min_limits(self.robot, self.gripper_joints)) self.gripper_confs = [self.open_gq, self.closed_gq] self.open_kitchen_confs = { joint: FConf(self.kitchen, [joint], [self.open_conf(joint)]) for joint in self.kitchen_joints } self.closed_kitchen_confs = { joint: FConf(self.kitchen, [joint], [self.closed_conf(joint)]) for joint in self.kitchen_joints } self._update_custom_limits() self._update_initial()
def main(): """ ./home/demo/catkin_percep/collect_scales.sh (includes scale offset) Make sure to start the scales with nothing on them (for calibration) """ assert (get_python_version() == 2) # ariadne has ROS with python2 parser = argparse.ArgumentParser() parser.add_argument('-a', '--active', action='store_true', help='Uses active learning queries.') parser.add_argument('-d', '--debug', action='store_true', help='Disables saving during debugging.') parser.add_argument( '-f', '--fn', type=str, default=TRAINING, # DESIGNED | TRAINING help='The name of or the path to the policy that generates parameters.' ) parser.add_argument('-m', '--material', required=True, choices=sorted(MATERIALS), help='The name of the material being used.') parser.add_argument('-p', '--problem', required=True, choices=sorted(REQUIREMENT_FNS.keys()), help='The name of the skill to learn.') parser.add_argument('-s', '--spoon', default=None, choices=SPOONS, help='The name of the spoon being used.') parser.add_argument('-r', '--train', action='store_true', help='When enabled, uses the training dataset.') parser.add_argument( '-v', '--visualize_planning', action='store_true', help= 'When enabled, visualizes planning rather than the world (for debugging).' ) args = parser.parse_args() # TODO: toggle material default based on task # TODO: label material on the image assert args.material in MODEL_MASSES print('Policy:', args.fn) assert implies(args.problem in ['scoop'], args.spoon is not None) assert implies(args.active, args.train) ros_world = ROSWorld(sim_only=False, visualize=not args.visualize_planning) classes_pub = rospy.Publisher('~collect_classes', String, queue_size=1) with ros_world: set_camera_pose(np.array([1.5, -0.5, 1.5]), target_point=np.array([0.75, 0, 0.75])) arm, is_open = ACTIVE_ARMS[args.problem] open_grippers = {arm: is_open} if args.problem == 'scoop': ros_world.controller.open_gripper(get_arm_prefix(arm), blocking=True) ros_world.controller.speak("{:.0f} seconds to attach {}".format( ATTACH_TIME, format_class(args.spoon))) rospy.sleep(ATTACH_TIME) # Sleep to have time to set the spoon move_to_initial_config(ros_world, open_grippers) #get_other_arm # TODO: cross validation for measuring performance across a few bowls launch = launch_kinect() video_time = time.time() if args.debug: ros_world.controller.speak("Warning! Data will not be saved.") time.sleep(1.0) data_path = None else: # TODO: only create directory if examples made data_path = get_data_path(args.problem, real=True) # TODO: log camera image after the pour policy = args.fn learner_path = None test_data = None if isinstance(policy, str) and os.path.isfile(policy): policy = read_pickle(policy) assert isinstance(policy, ActiveLearner) print(policy) print(policy.algorithm) #policy.transfer_weight = 0 # print(policy.xx.shape) # policy.results = policy.results[:-1] # policy.xx = policy.xx[:-1] # policy.yy = policy.yy[:-1] # policy.weights = policy.weights[:-1] # print(policy.xx.shape) # write_pickle(args.fn, policy) # print('Saved', args.fn) if args.active: # policy.retrain() test_domain = load_data(SCOOP_TEST_DATASETS, verbose=False) test_data = test_domain.create_dataset(include_none=False, binary=False) policy.query_type = STRADDLE # VARIANCE #policy.weights = 0.1*np.ones(policy.yy.shape) # TODO: make this multiplicative #policy.retrain() evaluate_confusions(test_data, policy) else: policy.query_type = BEST ensure_dir(LEARNER_DIRECTORY) date_name = datetime.datetime.now().strftime(DATE_FORMAT) filename = '{}_{}.pk{}'.format(get_label(policy.algorithm), date_name, get_python_version()) learner_path = os.path.join(LEARNER_DIRECTORY, filename) if ACTIVE_FEATURE and args.active: assert isinstance(policy, ActiveLearner) generator = create_active_generator(args, policy) else: generator = create_random_generator(args) pair = next(generator) print('Next pair:', pair) classes_pub.publish('{},{}'.format(*pair)) for phrase in map(format_class, pair): ros_world.controller.speak(phrase) wait_for_user('Press enter to begin') # TODO: change the name of the directory after additional samples results = [] num_trials = num_failures = num_scored = 0 while True: start_time = elapsed_time(video_time) result = run_loop(args, ros_world, policy) print('Result:', str_from_object(result)) print('{}\nTrials: {} | Successes: {} | Failures: {} | Time: {:.3f}'. format(SEPARATOR, num_trials, len(results), num_failures, elapsed_time(video_time))) num_trials += 1 if result is None: # TODO: result['execution'] num_failures += 1 print('Error! Trial resulted in an exception') move_to_initial_config(ros_world, open_grippers) continue end_time = elapsed_time(video_time) print('Elapsed time:', end_time - start_time) # TODO: record the type of failure (planning, execution, etc...) scored = result['score'] is not None num_scored += scored # TODO: print the score if isinstance(policy, ActiveLearner) and args.active: # and scored: update_learner(policy, learner_path, result) evaluate_confusions(test_data, policy) # TODO: how to handle failures that require bad annotations? pair = next(generator) print('Next pair:', pair) classes_pub.publish('{},{}'.format(*pair)) for phrase in map(format_class, pair): ros_world.controller.speak(phrase) annotation = wait_for_user( 'Enter annotation and press enter to continue: ') result.update({ # TODO: record the query_type 'policy': args.fn, 'active_feature': ACTIVE_FEATURE, 'trial': num_trials, 'start_time': start_time, 'end_time': end_time, 'annotation': annotation, }) results.append(result) if data_path is not None: write_results(data_path, results) #if annotation in ['q', 'quit']: # TODO: Ctrl-C to quit # break ros_world.controller.speak("Finished") if launch is not None: launch.shutdown() print('Total time:', elapsed_time(video_time))
def main(floor_width=2.0): # Creates a pybullet world and a visualizer for it connect(use_gui=True) set_camera_pose(camera_point=[1, -1, 1], target_point=unit_point()) # Sets the camera's position identity_pose = (unit_point(), unit_quat()) origin_handles = draw_pose(identity_pose, length=1.0) # Draws the origin coordinate system (x:RED, y:GREEN, z:BLUE) # Bodies are described by an integer index floor = create_box(w=floor_width, l=floor_width, h=0.001, color=TAN) # Creates a tan box object for the floor set_point(floor, [0, 0, -0.001 / 2.]) # Sets the [x,y,z] translation of the floor obstacle = create_box(w=0.5, l=0.5, h=0.1, color=RED) # Creates a red box obstacle set_point(obstacle, [0.5, 0.5, 0.1 / 2.]) # Sets the [x,y,z] position of the obstacle print('Position:', get_point(obstacle)) set_euler(obstacle, [0, 0, np.pi / 4]) # Sets the [roll,pitch,yaw] orientation of the obstacle print('Orientation:', get_euler(obstacle)) with LockRenderer(): # Temporarily prevents the renderer from updating for improved loading efficiency with HideOutput(): # Temporarily suppresses pybullet output robot = load_model(TURTLEBOT_URDF) # TURTLEBOT_URDF | ROOMBA_URDF # Loads a robot from a *.urdf file assign_link_colors(robot) robot_z = stable_z(robot, floor) # Returns the z offset required for robot to be placed on floor set_point(robot, [0, 0, robot_z]) # Sets the z position of the robot dump_body(robot) # Prints joint and link information about robot set_all_static() # Joints are also described by an integer index # The turtlebots has explicit joints representing x, y, theta x_joint = joint_from_name(robot, 'x') # Looks up the robot joint named 'x' y_joint = joint_from_name(robot, 'y') # Looks up the robot joint named 'y' theta_joint = joint_from_name(robot, 'theta') # Looks up the robot joint named 'theta' joints = [x_joint, y_joint, theta_joint] base_link = link_from_name(robot, 'base_link') # Looks up the robot link named 'base_link' world_from_obstacle = get_pose(obstacle) # Returns the pose of the origin of obstacle wrt the world frame obstacle_aabb = get_subtree_aabb(obstacle) draw_aabb(obstacle_aabb) random.seed(0) # Sets the random number generator state handles = [] for i in range(10): for handle in handles: remove_debug(handle) print('\nIteration: {}'.format(i)) x = random.uniform(-floor_width/2., floor_width/2.) set_joint_position(robot, x_joint, x) # Sets the current value of the x joint y = random.uniform(-floor_width/2., floor_width/2.) set_joint_position(robot, y_joint, y) # Sets the current value of the y joint yaw = random.uniform(-np.pi, np.pi) set_joint_position(robot, theta_joint, yaw) # Sets the current value of the theta joint values = get_joint_positions(robot, joints) # Obtains the current values for the specified joints print('Joint values: [x={:.3f}, y={:.3f}, yaw={:.3f}]'.format(*values)) world_from_robot = get_link_pose(robot, base_link) # Returns the pose of base_link wrt the world frame position, quaternion = world_from_robot # Decomposing pose into position and orientation (quaternion) x, y, z = position # Decomposing position into x, y, z print('Base link position: [x={:.3f}, y={:.3f}, z={:.3f}]'.format(x, y, z)) euler = euler_from_quat(quaternion) # Converting from quaternion to euler angles roll, pitch, yaw = euler # Decomposing orientation into roll, pitch, yaw print('Base link orientation: [roll={:.3f}, pitch={:.3f}, yaw={:.3f}]'.format(roll, pitch, yaw)) handles.extend(draw_pose(world_from_robot, length=0.5)) # # Draws the base coordinate system (x:RED, y:GREEN, z:BLUE) obstacle_from_robot = multiply(invert(world_from_obstacle), world_from_robot) # Relative transformation from robot to obstacle robot_aabb = get_subtree_aabb(robot, base_link) # Computes the robot's axis-aligned bounding box (AABB) lower, upper = robot_aabb # Decomposing the AABB into the lower and upper extrema center = (lower + upper)/2. # Computing the center of the AABB extent = upper - lower # Computing the dimensions of the AABB handles.extend(draw_point(center)) handles.extend(draw_aabb(robot_aabb)) collision = pairwise_collision(robot, obstacle) # Checks whether robot is currently colliding with obstacle print('Collision: {}'.format(collision)) wait_for_duration(1.0) # Like sleep() but also updates the viewer wait_if_gui() # Like raw_input() but also updates the viewer # Destroys the pybullet world disconnect()
def main(): parser = argparse.ArgumentParser() # Automatically includes help parser.add_argument('-viewer', action='store_true', help='enable viewer.') args = parser.parse_args() connect(use_gui=True) #ycb_path = os.path.join(DRAKE_PATH, DRAKE_YCB) #ycb_path = os.path.join(YCB_PATH, YCB_TEMPLATE.format('003_cracker_box')) #print(ycb_path) #load_pybullet(ycb_path) with LockRenderer(): draw_pose(unit_pose(), length=1, width=1) floor = create_floor() set_point(floor, Point(z=-EPSILON)) table = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH / 2, height=TABLE_WIDTH / 2, top_color=TAN, cylinder=False) #set_euler(table, Euler(yaw=np.pi/2)) with HideOutput(False): # data_path = add_data_path() # robot_path = os.path.join(data_path, WSG_GRIPPER) robot_path = get_model_path( WSG_50_URDF) # WSG_50_URDF | PANDA_HAND_URDF #robot_path = get_file_path(__file__, 'mit_arch_suction_gripper/mit_arch_suction_gripper.urdf') robot = load_pybullet(robot_path, fixed_base=True) #dump_body(robot) #robot = create_cylinder(radius=0.5*BLOCK_SIDE, height=4*BLOCK_SIDE) # vacuum gripper block1 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=RED) block_z = stable_z(block1, table) set_point(block1, Point(z=block_z)) block2 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=GREEN) set_point(block2, Point(x=+0.25, z=block_z)) block3 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=BLUE) set_point(block3, Point(x=-0.15, z=block_z)) blocks = [block1, block2, block3] add_line(Point(x=-TABLE_WIDTH / 2, z=block_z - BLOCK_SIDE / 2 + EPSILON), Point(x=+TABLE_WIDTH / 2, z=block_z - BLOCK_SIDE / 2 + EPSILON), color=RED) set_camera_pose(camera_point=Point(y=-1, z=block_z + 1), target_point=Point(z=block_z)) wait_for_user() block_pose = get_pose(block1) open_gripper(robot) tool_link = link_from_name(robot, 'tool_link') base_from_tool = get_relative_pose(robot, tool_link) #draw_pose(unit_pose(), parent=robot, parent_link=tool_link) y_grasp, x_grasp = get_top_grasps(block1, tool_pose=unit_pose(), grasp_length=0.03, under=False) grasp = y_grasp # fingers won't collide gripper_pose = multiply(block_pose, invert(grasp)) set_pose(robot, multiply(gripper_pose, invert(base_from_tool))) wait_for_user('Finish?') disconnect()
def reset_robot(task, ros_world, **kwargs): with ClientSaver(ros_world.perception.client): set_camera_pose(np.array([1.5, -0.5, 1.5]), target_point=np.array([0.75, 0, 0.75])) open_grippers = {arm: arm in task.init_holding for arm in task.arms} move_to_initial_config(ros_world, open_grippers, **kwargs)