def add_scales(task, ros_world): scale_stackings = {} holding = {grasp.obj_name for grasp in task.init_holding.values()} with ClientSaver(ros_world.client): perception = ros_world.perception items = sorted(set(perception.get_items()) - holding, key=lambda n: get_point(ros_world.get_body(n))[1], reverse=False) # Right to left for i, item in enumerate(items): if not POUR and (get_type(item) != SCOOP_CUP): continue item_body = ros_world.get_body(item) scale = create_name(SCALE_TYPE, i + 1) with HideOutput(): scale_body = load_pybullet(get_body_urdf(get_type(scale)), fixed_base=True) ros_world.perception.sim_bodies[scale] = scale_body ros_world.perception.sim_items[scale] = None item_z = stable_z(item_body, scale_body) scale_pose_item = Pose( point=Point(z=-item_z)) # TODO: relies on origin in base set_pose(scale_body, multiply(get_pose(item_body), scale_pose_item)) roll, pitch, _ = get_euler(scale_body) set_euler(scale_body, [roll, pitch, 0]) scale_stackings[scale] = item #wait_for_user() return scale_stackings
def mirror_robot(robot1, node_points): # TODO: place robots side by side or diagonal across set_extrusion_camera(node_points, theta=-np.pi / 3) #draw_pose(Pose()) centroid = np.average(node_points, axis=0) centroid_pose = Pose(point=centroid) #draw_pose(Pose(point=centroid)) # print(centroid) scale = 0. # 0.15 vector = get_point(robot1) - centroid set_pose(robot1, Pose(point=Point(*+scale * vector[:2]))) # Inner product of end-effector z with base->centroid or perpendicular to this line # Partition by sides robot2 = load_robot() set_pose( robot2, Pose(point=Point(*-(2 + scale) * vector[:2]), euler=Euler(yaw=np.pi))) # robots = [robot1] robots = [robot1, robot2] for robot in robots: set_configuration(robot, DUAL_CONF) # joint1 = get_movable_joints(robot)[0] # set_joint_position(robot, joint1, np.pi / 8) draw_pose(get_pose(robot), length=0.25) return robots
def pour_beads(world, cup_name, beads, reset_contained=False, fix_outside=True, cup_thickness=0.01, bead_offset=0.01, drop_rate=0.02, **kwargs): if not beads: return set() start_time = time.time() # TODO: compute the radius of each bead bead_radius = np.average(approximate_as_prism(beads[0])) / 2. masses = list(map(get_mass, beads)) savers = list(map(BodySaver, beads)) for body in beads: set_mass(body, 0) cup = world.get_body(cup_name) local_center, (diameter, height) = approximate_as_cylinder(cup) center = get_point(cup) + local_center interior_radius = max(0.0, diameter / 2. - bead_radius - cup_thickness) # TODO: fill up to a certain threshold ty = get_type(cup_name) if ty in SPOON_DIAMETERS: # TODO: do this in a more principled way interior_radius = 0 center[1] += (SPOON_LENGTHS[ty] - SPOON_DIAMETERS[ty]) / 2. # TODO: some sneak out through the bottom # TODO: could reduce gravity while filling world.controller.set_gravity() for i, bead in enumerate(beads): # TODO: halton sequence x, y = center[:2] + np.random.uniform( 0, interior_radius) * unit_from_theta( np.random.uniform(-np.pi, np.pi)) new_z = center[2] + height / 2. + bead_radius + bead_offset set_point(bead, [x, y, new_z]) set_mass(bead, masses[i]) world.controller.rest_for_duration(drop_rate) world.controller.rest_for_duration(BEADS_REST) print('Simulated {} beads in {:3f} seconds'.format( len(beads), elapsed_time(start_time))) contained_beads = get_contained_beads(cup, beads, **kwargs) #wait_for_user() for body in beads: if fix_outside and (body not in contained_beads): set_mass(body, 0) for saver in savers: if reset_contained or (saver.body not in contained_beads): saver.restore() #wait_for_user() return contained_beads
def update_world(world, target_body, camera_point=VIEWER_POINT): pr2 = world.perception.pr2 with ClientSaver(world.perception.client): open_arm(pr2, LEFT_ARM) open_arm(pr2, RIGHT_ARM) set_group_conf(pr2, 'torso', [TORSO_POSITION]) set_group_conf(pr2, arm_from_arm('left'), WIDE_LEFT_ARM) set_group_conf(pr2, arm_from_arm('right'), rightarm_from_leftarm(WIDE_LEFT_ARM)) target_point = get_point(target_body) head_conf = inverse_visibility(pr2, target_point, head_name=CAMERA_OPTICAL_FRAME) # Must come after torso set_group_conf(pr2, 'head', head_conf) set_camera_pose(camera_point, target_point=target_point)
def draw_push_goal(world, block_name, pos2d): block_z = get_point(world.get_body(block_name))[2] handles = draw_point(np.append(pos2d, [block_z]), size=0.05, color=(1, 0, 0)) lower, upper = POSE2D_RANGE handles.extend( add_segments([ (lower[0], lower[1], block_z), (upper[0], lower[1], block_z), (upper[0], upper[1], block_z), (lower[0], upper[1], block_z), ], closed=True)) return handles
def _update_custom_limits(self, min_extent=0.0): #robot_extent = get_aabb_extent(get_aabb(self.robot)) # Scaling by 0.5 to prevent getting caught in corners #min_extent = 0.5 * min(robot_extent[:2]) * np.ones(2) / 2 world_aabb = self.get_world_aabb() full_lower, full_upper = world_aabb base_limits = (full_lower[:2] - min_extent, full_upper[:2] + min_extent) base_limits[1][0] = COMPUTER_X - min_extent # TODO: robot radius base_limits[0][1] += 0.1 base_limits[1][1] -= 0.1 for handle in self.base_limits_handles: remove_debug(handle) self.base_limits_handles = [] #self.base_limits_handles.extend(draw_aabb(world_aabb)) z = get_point(self.floor)[2] + 1e-2 self.base_limits_handles.extend(draw_base_limits(base_limits, z=z)) self.custom_limits = custom_limits_from_base_limits( self.robot, base_limits) return self.custom_limits
def _initialize_environment(self): # wall to fridge: 4cm # fridge to goal: 1.5cm # hitman to range: 3.5cm # range to indigo: 3.5cm self.environment_poses = read_json(POSES_PATH) root_from_world = get_link_pose(self.kitchen, self.world_link) for name, world_from_part in self.environment_poses.items(): if name in ['range']: continue visual_path = os.path.join(KITCHEN_LEFT_PATH, '{}.obj'.format(name)) collision_path = os.path.join(KITCHEN_LEFT_PATH, '{}_collision.obj'.format(name)) mesh_path = None for path in [collision_path, visual_path]: if os.path.exists(path): mesh_path = path break if mesh_path is None: continue body = load_pybullet(mesh_path, fixed_base=True) root_from_part = multiply(root_from_world, world_from_part) if name in ['axe', 'dishwasher', 'echo', 'fox', 'golf']: (pos, quat) = root_from_part # TODO: still not totally aligned pos = np.array(pos) + np.array([0, -0.035, 0]) # , -0.005]) root_from_part = (pos, quat) self.environment_bodies[name] = body set_pose(body, root_from_part) # TODO: release bounding box or convex hull # TODO: static object nonconvex collisions if TABLE_NAME in self.environment_bodies: body = self.environment_bodies[TABLE_NAME] _, (w, l, _) = approximate_as_prism(body) _, _, z = get_point(body) new_pose = Pose(Point(TABLE_X + l / 2, -TABLE_Y, z), Euler(yaw=np.pi / 2)) set_pose(body, new_pose)
def __init__(self, robot_name=FRANKA_CARTER, use_gui=True, full_kitchen=False): self.task = None self.interface = None self.client = connect(use_gui=use_gui) set_real_time(False) #set_caching(False) # Seems to make things worse disable_gravity() add_data_path() set_camera_pose(camera_point=[2, -1.5, 1]) if DEBUG: draw_pose(Pose(), length=3) #self.kitchen_yaml = load_yaml(KITCHEN_YAML) with HideOutput(enable=True): self.kitchen = load_pybullet(KITCHEN_PATH, fixed_base=True, cylinder=True) self.floor = load_pybullet('plane.urdf', fixed_base=True) z = stable_z(self.kitchen, self.floor) - get_point(self.floor)[2] point = np.array(get_point(self.kitchen)) - np.array([0, 0, z]) set_point(self.floor, point) self.robot_name = robot_name if self.robot_name == FRANKA_CARTER: urdf_path, yaml_path = FRANKA_CARTER_PATH, None #urdf_path, yaml_path = FRANKA_CARTER_PATH, FRANKA_YAML #elif self.robot_name == EVE: # urdf_path, yaml_path = EVE_PATH, None else: raise ValueError(self.robot_name) self.robot_yaml = yaml_path if yaml_path is None else load_yaml( yaml_path) with HideOutput(enable=True): self.robot = load_pybullet(urdf_path) #dump_body(self.robot) #chassis_pose = get_link_pose(self.robot, link_from_name(self.robot, 'chassis_link')) #wheel_pose = get_link_pose(self.robot, link_from_name(self.robot, 'left_wheel_link')) #wait_for_user() set_point(self.robot, Point(z=stable_z(self.robot, self.floor))) self.set_initial_conf() self.gripper = create_gripper(self.robot) self.environment_bodies = {} if full_kitchen: self._initialize_environment() self._initialize_ik(urdf_path) self.initial_saver = WorldSaver() self.body_from_name = {} # self.path_from_name = {} self.names_from_type = {} self.custom_limits = {} self.base_limits_handles = [] self.cameras = {} self.disabled_collisions = set() if self.robot_name == FRANKA_CARTER: self.disabled_collisions.update( tuple(link_from_name(self.robot, link) for link in pair) for pair in DISABLED_FRANKA_COLLISIONS) self.carry_conf = FConf(self.robot, self.arm_joints, self.default_conf) #self.calibrate_conf = Conf(self.robot, self.arm_joints, load_calibrate_conf(side='left')) self.calibrate_conf = FConf( self.robot, self.arm_joints, self.default_conf) # Must differ from carry_conf self.special_confs = [self.carry_conf] #, self.calibrate_conf] self.open_gq = FConf(self.robot, self.gripper_joints, get_max_limits(self.robot, self.gripper_joints)) self.closed_gq = FConf(self.robot, self.gripper_joints, get_min_limits(self.robot, self.gripper_joints)) self.gripper_confs = [self.open_gq, self.closed_gq] self.open_kitchen_confs = { joint: FConf(self.kitchen, [joint], [self.open_conf(joint)]) for joint in self.kitchen_joints } self.closed_kitchen_confs = { joint: FConf(self.kitchen, [joint], [self.closed_conf(joint)]) for joint in self.kitchen_joints } self._update_custom_limits() self._update_initial()
def main(floor_width=2.0): # Creates a pybullet world and a visualizer for it connect(use_gui=True) identity_pose = (unit_point(), unit_quat()) origin_handles = draw_pose( identity_pose, length=1.0 ) # Draws the origin coordinate system (x:RED, y:GREEN, z:BLUE) # Bodies are described by an integer index floor = create_box(w=floor_width, l=floor_width, h=0.001, color=TAN) # Creates a tan box object for the floor set_point(floor, [0, 0, -0.001 / 2.]) # Sets the [x,y,z] translation of the floor obstacle = create_box(w=0.5, l=0.5, h=0.1, color=RED) # Creates a red box obstacle set_point( obstacle, [0.5, 0.5, 0.1 / 2.]) # Sets the [x,y,z] position of the obstacle print('Position:', get_point(obstacle)) set_euler(obstacle, [0, 0, np.pi / 4 ]) # Sets the [roll,pitch,yaw] orientation of the obstacle print('Orientation:', get_euler(obstacle)) with LockRenderer( ): # Temporarily prevents the renderer from updating for improved loading efficiency with HideOutput(): # Temporarily suppresses pybullet output robot = load_model(ROOMBA_URDF) # Loads a robot from a *.urdf file robot_z = stable_z( robot, floor ) # Returns the z offset required for robot to be placed on floor set_point(robot, [0, 0, robot_z]) # Sets the z position of the robot dump_body(robot) # Prints joint and link information about robot set_all_static() # Joints are also described by an integer index # The turtlebot has explicit joints representing x, y, theta x_joint = joint_from_name(robot, 'x') # Looks up the robot joint named 'x' y_joint = joint_from_name(robot, 'y') # Looks up the robot joint named 'y' theta_joint = joint_from_name( robot, 'theta') # Looks up the robot joint named 'theta' joints = [x_joint, y_joint, theta_joint] base_link = link_from_name( robot, 'base_link') # Looks up the robot link named 'base_link' world_from_obstacle = get_pose( obstacle ) # Returns the pose of the origin of obstacle wrt the world frame obstacle_aabb = get_subtree_aabb(obstacle) draw_aabb(obstacle_aabb) random.seed(0) # Sets the random number generator state handles = [] for i in range(10): for handle in handles: remove_debug(handle) print('\nIteration: {}'.format(i)) x = random.uniform(-floor_width / 2., floor_width / 2.) set_joint_position(robot, x_joint, x) # Sets the current value of the x joint y = random.uniform(-floor_width / 2., floor_width / 2.) set_joint_position(robot, y_joint, y) # Sets the current value of the y joint yaw = random.uniform(-np.pi, np.pi) set_joint_position(robot, theta_joint, yaw) # Sets the current value of the theta joint values = get_joint_positions( robot, joints) # Obtains the current values for the specified joints print('Joint values: [x={:.3f}, y={:.3f}, yaw={:.3f}]'.format(*values)) world_from_robot = get_link_pose( robot, base_link) # Returns the pose of base_link wrt the world frame position, quaternion = world_from_robot # Decomposing pose into position and orientation (quaternion) x, y, z = position # Decomposing position into x, y, z print('Base link position: [x={:.3f}, y={:.3f}, z={:.3f}]'.format( x, y, z)) euler = euler_from_quat( quaternion) # Converting from quaternion to euler angles roll, pitch, yaw = euler # Decomposing orientation into roll, pitch, yaw print('Base link orientation: [roll={:.3f}, pitch={:.3f}, yaw={:.3f}]'. format(roll, pitch, yaw)) handles.extend( draw_pose(world_from_robot, length=0.5) ) # # Draws the base coordinate system (x:RED, y:GREEN, z:BLUE) obstacle_from_robot = multiply( invert(world_from_obstacle), world_from_robot) # Relative transformation from robot to obstacle robot_aabb = get_subtree_aabb( robot, base_link) # Computes the robot's axis-aligned bounding box (AABB) lower, upper = robot_aabb # Decomposing the AABB into the lower and upper extrema center = (lower + upper) / 2. # Computing the center of the AABB extent = upper - lower # Computing the dimensions of the AABB handles.extend(draw_aabb(robot_aabb)) collision = pairwise_collision( robot, obstacle ) # Checks whether robot is currently colliding with obstacle print('Collision: {}'.format(collision)) wait_for_duration(1.0) # Like sleep() but also updates the viewer wait_for_user() # Like raw_input() but also updates the viewer # Destroys the pybullet world disconnect()
def eval_task_with_seed(domain, seed, learner, sample_strategy=DIVERSELK, task_lengthscale=None, obstacle=True): if task_lengthscale is not None: learner.task_lengthscale = task_lengthscale print('sample a new task') current_wd, trial_wd = start_task() ### fill in additional object item_ranges = {} ### sim_world, collector, task, feature, evalfunc, saver = sample_task_with_seed( domain.skill, seed=seed, visualize=False, item_ranges=item_ranges) context = domain.context_from_feature(feature) print('context=', *context) # create coffee machine if obstacle: bowl_name = 'bowl' cup_name = 'cup' if domain.skill == 'scoop': cup_name = 'spoon' for key in sim_world.perception.sim_bodies: if bowl_name in key: bowl_name = key if cup_name in key: cup_name = key cylinder_size = (.01, .03) dim_bowl = get_aabb_extent( p.getAABB(sim_world.perception.sim_bodies[bowl_name])) dim_cup = get_aabb_extent( p.getAABB(sim_world.perception.sim_bodies[cup_name])) bowl_point = get_point(sim_world.perception.sim_bodies[bowl_name]) bowl_point = np.array(bowl_point) cylinder_point = bowl_point.copy() cylinder_offset = 1.2 if domain.skill == 'scoop': cylinder_offset = 1.6 cylinder_point[2] += (dim_bowl[2] + dim_cup[2]) * (np.random.random_sample() * .4 + cylinder_offset) \ + cylinder_size[1] / 2. # TODO figure out the task space cylinder_name = create_name('cylinder', 1) obstacle = create_cylinder(*cylinder_size, color=(0, 0, 0, 1)) INFO_FROM_BODY[(CLIENT, obstacle)] = ModelInfo(cylinder_name, cylinder_size, 1, 1) sim_world.perception.sim_bodies[cylinder_name] = obstacle sim_world.perception.sim_items[cylinder_name] = None set_pose(obstacle, (cylinder_point, unit_quat())) box_name = create_name('box', 1) box1_size = (max(.17, dim_bowl[0] / 2 + cylinder_size[0] * 2), 0.01, 0.01) if domain.skill == 'scoop': box1_size = (max(.23, dim_bowl[0] / 2 + cylinder_size[0] * 2 + 0.05), 0.01, 0.01) obstacle = create_box(*box1_size) INFO_FROM_BODY[(CLIENT, obstacle)] = ModelInfo(box_name, box1_size, 1, 1) sim_world.perception.sim_bodies[box_name] = obstacle sim_world.perception.sim_items[box_name] = None box1_point = cylinder_point.copy() box1_point[2] += cylinder_size[1] / 2 + box1_size[2] / 2 box1_point[0] += box1_size[0] / 2 - cylinder_size[0] * 2 set_pose(obstacle, (box1_point, unit_quat())) box_name = create_name('box', 2) box2_size = (0.01, .01, box1_point[2] - bowl_point[2] + box1_size[2] / 2) obstacle = create_box(*box2_size) INFO_FROM_BODY[(CLIENT, obstacle)] = ModelInfo(box_name, box2_size, 1, 1) sim_world.perception.sim_bodies[box_name] = obstacle sim_world.perception.sim_items[box_name] = None box2_point = bowl_point.copy() box2_point[2] += box2_size[2] / 2 box2_point[0] = box1_point[0] + box1_size[0] / 2 set_pose(obstacle, (box2_point, unit_quat())) result = get_sample_strategy_result(sample_strategy, learner, context, sim_world, collector, task, feature, evalfunc, saver) print('context=', *context) complete_task(sim_world, current_wd, trial_wd) return result
def get_floor_z(world, floor_z=0.005): return get_point(world.floor)[2] + floor_z