def test_grasps(robot, node_points, elements): element = elements[-1] [element_body] = create_elements(node_points, [element]) phi = 0 link = link_from_name(robot, TOOL_NAME) link_pose = get_link_pose(robot, link) draw_pose(link_pose) #, parent=robot, parent_link=link) wait_for_interrupt() n1, n2 = element p1 = node_points[n1] p2 = node_points[n2] length = np.linalg.norm(p2 - p1) # Bottom of cylinder is p1, top is p2 print(element, p1, p2) for phi in np.linspace(0, np.pi, 10, endpoint=True): theta = np.pi / 4 for t in np.linspace(-length / 2, length / 2, 10): translation = Pose( point=Point(z=-t) ) # Want to be moving backwards because +z is out end effector direction = Pose(euler=Euler(roll=np.pi / 2 + theta, pitch=phi)) angle = Pose(euler=Euler(yaw=1.2)) grasp_pose = multiply(angle, direction, translation) element_pose = multiply(link_pose, grasp_pose) set_pose(element_body, element_pose) wait_for_interrupt()
def apply(self, state, **kwargs): # TODO: identify surface automatically with LockRenderer(): cone = get_viewcone(color=(1, 0, 0, 0.5)) set_pose(cone, get_link_pose(self.robot, self.link)) wait_for_duration(1e-2) wait_for_duration(self._duration) # TODO: don't sleep if no viewer? remove_body(cone) wait_for_duration(1e-2) if self.detect: # TODO: the collision geometries are being visualized # TODO: free the renderer detections = get_visual_detections(self.robot, camera_link=self.camera_frame) print('Detections:', detections) for body, dist in state.b_on.items(): obs = (body in detections) and (is_center_stable( body, self.surface)) if obs or (self.surface not in state.task.rooms): # TODO: make a command for scanning a room instead? dist.obsUpdate(get_observation_fn(self.surface), obs) #state.localized.update(detections) # TODO: pose for each object that can be real or fake yield
def fn(conf1, conf2, fluents=[]): #path = [conf1, conf2] obstacles = list(obstacle_from_name.values()) attachments = [] for fact in fluents: if fact[0] == 'atpose': index, pose = fact[1:] body = brick_from_index[index].body set_pose(body, pose.value) obstacles.append(body) elif fact[0] == 'atgrasp': index, grasp = fact[1:] body = brick_from_index[index].body attachments.append( Attachment(robot, tool_link, grasp.attach, body)) else: raise NotImplementedError(fact[0]) set_joint_positions(robot, movable_joints, conf1) path = plan_joint_motion( robot, movable_joints, conf2, obstacles=obstacles, attachments=attachments, self_collisions=True, disabled_collisions=disabled_collisions, #weights=weights, resolutions=resolutions, restarts=5, iterations=50, smooth=100) if path is None: return None traj = MotionTrajectory(robot, movable_joints, path) return traj,
def load_world(): # TODO: store internal world info here to be reloaded with HideOutput(): robot = load_model(DRAKE_IIWA_URDF) #add_data_path() #robot = load_pybullet(KUKA_IIWA_URDF) floor = load_model('models/short_floor.urdf') sink = load_model(SINK_URDF, pose=Pose(Point(x=-0.5))) stove = load_model(STOVE_URDF, pose=Pose(Point(x=+0.5))) celery = load_model(BLOCK_URDF, fixed_base=False) radish = load_model(SMALL_BLOCK_URDF, fixed_base=False) #cup = load_model('models/dinnerware/cup/cup_small.urdf', # Pose(Point(x=+0.5, y=+0.5, z=0.5)), fixed_base=False) body_names = { sink: 'sink', stove: 'stove', celery: 'celery', radish: 'radish', } movable_bodies = [celery, radish] set_pose(celery, Pose(Point(y=0.5, z=stable_z(celery, floor)))) set_pose(radish, Pose(Point(y=-0.5, z=stable_z(radish, floor)))) set_default_camera() return robot, body_names, movable_bodies
def load_world(): # TODO: store internal world info here to be reloaded set_default_camera() draw_global_system() with HideOutput(): #add_data_path() robot = load_model(DRAKE_IIWA_URDF, fixed_base=True) # DRAKE_IIWA_URDF | KUKA_IIWA_URDF floor = load_model('models/short_floor.urdf') sink = load_model(SINK_URDF, pose=Pose(Point(x=-0.5))) stove = load_model(STOVE_URDF, pose=Pose(Point(x=+0.5))) celery = load_model(BLOCK_URDF, fixed_base=False) radish = load_model(SMALL_BLOCK_URDF, fixed_base=False) #cup = load_model('models/dinnerware/cup/cup_small.urdf', # Pose(Point(x=+0.5, y=+0.5, z=0.5)), fixed_base=False) draw_pose(Pose(), parent=robot, parent_link=get_tool_link(robot)) # TODO: not working # dump_body(robot) # wait_for_user() body_names = { sink: 'sink', stove: 'stove', celery: 'celery', radish: 'radish', } movable_bodies = [celery, radish] set_pose(celery, Pose(Point(y=0.5, z=stable_z(celery, floor)))) set_pose(radish, Pose(Point(y=-0.5, z=stable_z(radish, floor)))) return robot, body_names, movable_bodies
def gen(robot, body): link = link_from_name(robot, BASE_LINK) with BodySaver(robot): set_base_conf(robot, np.zeros(3)) robot_pose = get_link_pose(robot, link) robot_aabb = get_turtle_aabb(robot) # _, upper = robot_aabb # radius = upper[0] extent = get_aabb_extent(robot_aabb) radius = max(extent[:2]) / 2. #draw_aabb(robot_aabb) center, (diameter, height) = approximate_as_cylinder(body) distance = radius + diameter / 2. + epsilon _, _, z = get_point(body) # Assuming already placed stably for [scale] in unit_generator(d=1, use_halton=use_halton): #theta = PI # 0 | PI theta = random.uniform(*theta_interval) position = np.append(distance * unit_from_theta(theta=theta), [z]) # TODO: halton yaw = scale * 2 * PI - PI quat = quat_from_euler(Euler(yaw=yaw)) body_pose = (position, quat) robot_from_body = multiply(invert(robot_pose), body_pose) grasp = Pose(body, robot_from_body) # TODO: grasp instead of pose if draw: world_pose = multiply(get_link_pose(robot, link), grasp.value) set_pose(body, world_pose) handles = draw_pose(get_pose(body), length=1) wait_for_user() remove_handles(handles) #continue yield (grasp, )
def gen(o, p): # default_conf = arm_conf(a, g.carry) # joints = get_arm_joints(robot, a) # TODO: check collisions with fixed links target_point = point_from_pose(p.value) base_generator = visible_base_generator(robot, target_point, base_range) while True: for _ in range(max_attempts): set_pose(o, p.value) base_conf = next(base_generator) #set_base_values(robot, base_conf) set_joint_positions(robot, base_joints, base_conf) if any(pairwise_collision(robot, b) for b in fixed): continue head_conf = inverse_visibility(robot, target_point) if head_conf is None: # TODO: test if visible continue #bq = Pose(robot, get_pose(robot)) bq = Conf(robot, base_joints, base_conf) hq = Conf(robot, head_joints, head_conf) yield (bq, hq) break else: yield None
def plan_commands(state, viewer=False, teleport=False, profile=False, verbose=True): # TODO: could make indices into set of bodies to ensure the same... # TODO: populate the bodies here from state and not the real world sim_world = connect(use_gui=viewer) #clone_world(client=sim_world) task = state.task robot_conf = get_configuration(task.robot) robot_pose = get_pose(task.robot) with ClientSaver(sim_world): with HideOutput(): robot = create_pr2(use_drake=USE_DRAKE_PR2) set_pose(robot, robot_pose) set_configuration(robot, robot_conf) mapping = clone_world(client=sim_world, exclude=[task.robot]) assert all(i1 == i2 for i1, i2 in mapping.items()) set_client(sim_world) saved_world = WorldSaver() # StateSaver() pddlstream_problem = pddlstream_from_state(state, teleport=teleport) _, _, _, stream_map, init, goal = pddlstream_problem print('Init:', sorted(init, key=lambda f: f[0])) if verbose: print('Goal:', goal) print('Streams:', stream_map.keys()) stream_info = { 'test-vis-base': StreamInfo(eager=True, p_success=0), 'test-reg-base': StreamInfo(eager=True, p_success=0), } hierarchy = [ ABSTRIPSLayer(pos_pre=['AtBConf']), ] pr = cProfile.Profile() pr.enable() solution = solve_focused(pddlstream_problem, stream_info=stream_info, hierarchy=hierarchy, debug=False, success_cost=MAX_COST, verbose=verbose) plan, cost, evaluations = solution if MAX_COST <= cost: plan = None print_solution(solution) print('Finite cost:', cost < MAX_COST) commands = post_process(state, plan) pr.disable() if profile: pstats.Stats(pr).sort_stats('cumtime').print_stats(10) saved_world.restore() disconnect() return commands
def place_movable(certified): placed = [] for literal in certified: if literal[0] == 'not': fact = literal[1] if fact[0] == 'trajcollision': _, b, p = fact[1:] set_pose(b, p.pose) placed.append(b) return placed
def main(): parser = argparse.ArgumentParser() # Automatically includes help parser.add_argument('-viewer', action='store_true', help='enable viewer.') args = parser.parse_args() connect(use_gui=True) #ycb_path = os.path.join(DRAKE_PATH, DRAKE_YCB) #ycb_path = os.path.join(YCB_PATH, YCB_TEMPLATE.format('003_cracker_box')) #print(ycb_path) #load_pybullet(ycb_path) with LockRenderer(): draw_pose(unit_pose(), length=1, width=1) floor = create_floor() set_point(floor, Point(z=-EPSILON)) table = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH/2, height=TABLE_WIDTH/2, top_color=TAN, cylinder=False) #set_euler(table, Euler(yaw=np.pi/2)) with HideOutput(False): # data_path = add_data_path() # robot_path = os.path.join(data_path, WSG_GRIPPER) robot_path = get_model_path(WSG_50_URDF) # WSG_50_URDF | PANDA_HAND_URDF #robot_path = get_file_path(__file__, 'mit_arch_suction_gripper/mit_arch_suction_gripper.urdf') robot = load_pybullet(robot_path, fixed_base=True) #dump_body(robot) #robot = create_cylinder(radius=0.5*BLOCK_SIDE, height=4*BLOCK_SIDE) # vacuum gripper block1 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=RED) block_z = stable_z(block1, table) set_point(block1, Point(z=block_z)) block2 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=GREEN) set_point(block2, Point(x=+0.25, z=block_z)) block3 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=BLUE) set_point(block3, Point(x=-0.15, z=block_z)) blocks = [block1, block2, block3] add_line(Point(x=-TABLE_WIDTH/2, z=block_z - BLOCK_SIDE/2 + EPSILON), Point(x=+TABLE_WIDTH/2, z=block_z - BLOCK_SIDE/2 + EPSILON), color=RED) set_camera_pose(camera_point=Point(y=-1, z=block_z+1), target_point=Point(z=block_z)) wait_for_user() block_pose = get_pose(block1) open_gripper(robot) tool_link = link_from_name(robot, 'tool_link') base_from_tool = get_relative_pose(robot, tool_link) #draw_pose(unit_pose(), parent=robot, parent_link=tool_link) y_grasp, x_grasp = get_top_grasps(block1, tool_pose=unit_pose(), grasp_length=0.03, under=False) grasp = y_grasp # fingers won't collide gripper_pose = multiply(block_pose, invert(grasp)) set_pose(robot, multiply(gripper_pose, invert(base_from_tool))) wait_for_user('Finish?') disconnect()
def gen(rover, objective): base_joints = get_base_joints(rover) target_point = get_point(objective) base_generator = visible_base_generator(rover, target_point, base_range) lower_limits, upper_limits = get_custom_limits(rover, base_joints, custom_limits) attempts = 0 while True: if max_attempts <= attempts: attempts = 0 yield None attempts += 1 base_conf = next(base_generator) if not all_between(lower_limits, base_conf, upper_limits): continue bq = Conf(rover, base_joints, base_conf) bq.assign() if any(pairwise_collision(rover, b) for b in obstacles): continue link_pose = get_link_pose(rover, link_from_name(rover, KINECT_FRAME)) if use_cone: mesh, _ = get_detection_cone(rover, objective, camera_link=KINECT_FRAME, depth=max_range) if mesh is None: continue cone = create_mesh(mesh, color=(0, 1, 0, 0.5)) local_pose = Pose() else: distance = get_distance(point_from_pose(link_pose), target_point) if max_range < distance: continue cone = create_cylinder(radius=0.01, height=distance, color=(0, 0, 1, 0.5)) local_pose = Pose(Point(z=distance / 2.)) set_pose(cone, multiply(link_pose, local_pose)) # TODO: colors corresponding to scanned object if any( pairwise_collision(cone, b) for b in obstacles if b != objective and not is_placement(objective, b)): # TODO: ensure that this works for the rover remove_body(cone) continue if not reachable_test(rover, bq): continue print('Visibility attempts:', attempts) y = Ray(cone, rover, objective) yield Output(bq, y)
def apply(self, state, **kwargs): mesh, _ = get_detection_cone(self.robot, self.body, depth=MAX_KINECT_DISTANCE) assert (mesh is not None) cone = create_mesh(mesh, color=(0, 1, 0, 0.5)) set_pose(cone, get_link_pose(self.robot, self.link)) wait_for_duration(self._duration) # time.sleep(1.0) remove_body(cone) state.registered.add(self.body)
def test(b1, p1, g1, b2, p2): if not collisions or (b1 == b2): return True p2.assign() grasp_pose = multiply(p1.value, invert(g1.value)) approach_pose = multiply(p1.value, invert(g1.approach), g1.value) for obj_pose in interpolate_poses(grasp_pose, approach_pose): set_pose(b1, obj_pose) if pairwise_collision(b1, b2): return False return True
def gen_fn(index, pose, grasp): body = brick_from_index[index].body set_pose(body, pose.value) obstacles = list(obstacle_from_name.values()) # + [body] collision_fn = get_collision_fn( robot, movable_joints, obstacles=obstacles, attachments=[], self_collisions=SELF_COLLISIONS, disabled_collisions=disabled_collisions, custom_limits=get_custom_limits(robot)) attach_pose = multiply(pose.value, invert(grasp.attach)) approach_pose = multiply(attach_pose, (approach_vector, unit_quat())) # approach_pose = multiply(pose.value, invert(grasp.approach)) for _ in range(max_attempts): if IK_FAST: attach_conf = sample_tool_ik(robot, attach_pose) else: set_joint_positions(robot, movable_joints, sample_fn()) # Random seed attach_conf = inverse_kinematics(robot, tool_link, attach_pose) if (attach_conf is None) or collision_fn(attach_conf): continue set_joint_positions(robot, movable_joints, attach_conf) if IK_FAST: approach_conf = sample_tool_ik(robot, approach_pose, nearby_conf=attach_conf) else: approach_conf = inverse_kinematics(robot, tool_link, approach_pose) if (approach_conf is None) or collision_fn(approach_conf): continue set_joint_positions(robot, movable_joints, approach_conf) path = plan_direct_joint_motion( robot, movable_joints, attach_conf, obstacles=obstacles, self_collisions=SELF_COLLISIONS, disabled_collisions=disabled_collisions) if path is None: # TODO: retreat continue #path = [approach_conf, attach_conf] attachment = Attachment(robot, tool_link, grasp.attach, body) traj = MotionTrajectory(robot, movable_joints, path, attachments=[attachment]) yield approach_conf, traj break
def apply(self, state, **kwargs): print(self.visual_data) with LockRenderer(): visual_id = visual_shape_from_data(self.visual_data[0]) cone = create_body(visual_id=visual_id) #cone = create_mesh(mesh, color=(0, 1, 0, 0.5)) set_pose(cone, self.pose) wait_for_duration(self._duration) with LockRenderer(): remove_body(cone) wait_for_duration(1e-2) wait_for_duration(self._duration) # TODO: set to transparent before removing yield
def apply(self, state, **kwargs): # TODO: identify surface automatically cone = get_viewcone(color=(1, 0, 0, 0.5)) set_pose(cone, get_link_pose(self.robot, self.link)) wait_for_duration(self._duration) # TODO: don't sleep if no viewer? remove_body(cone) detections = get_visual_detections(self.robot) print('Detections:', detections) for body, dist in state.b_on.items(): obs = (body in detections) and (is_center_stable( body, self.surface)) if obs or (self.surface not in state.task.rooms): # TODO: make a command for scanning a room instead? dist.obsUpdate(get_observation_fn(self.surface), obs)
def plan_commands(state, teleport=False, profile=False, verbose=True): # TODO: could make indices into set of bodies to ensure the same... # TODO: populate the bodies here from state task = state.task robot_conf = get_configuration(task.robot) robot_pose = get_pose(task.robot) sim_world = connect(use_gui=False) with ClientSaver(sim_world): with HideOutput(): robot = create_pr2(use_drake=USE_DRAKE_PR2) #robot = load_model(DRAKE_PR2_URDF, fixed_base=True) set_pose(robot, robot_pose) set_configuration(robot, robot_conf) clone_world(client=sim_world, exclude=[task.robot]) set_client(sim_world) saved_world = WorldSaver() # StateSaver() pddlstream_problem = pddlstream_from_state(state, teleport=teleport) _, _, _, stream_map, init, goal = pddlstream_problem print('Init:', sorted(init, key=lambda f: f[0])) if verbose: print('Goal:', goal) print('Streams:', stream_map.keys()) stream_info = { 'test-vis-base': StreamInfo(eager=True, p_success=0), 'test-reg-base': StreamInfo(eager=True, p_success=0), } pr = cProfile.Profile() pr.enable() solution = solve_focused(pddlstream_problem, stream_info=stream_info, max_cost=MAX_COST, verbose=verbose) pr.disable() plan, cost, evaluations = solution if MAX_COST <= cost: plan = None print_solution(solution) print('Finite cost:', cost < MAX_COST) print('Real cost:', float(cost) / SCALE_COST) if profile: pstats.Stats(pr).sort_stats('tottime').print_stats(10) saved_world.restore() commands = post_process(state, plan) disconnect() return commands
def apply(self, state, **kwargs): mesh, _ = get_detection_cone(self.robot, self.body, camera_link=self.camera_frame, depth=self.max_depth) if mesh is None: wait_for_user() assert (mesh is not None) with LockRenderer(): cone = create_mesh(mesh, color=(0, 1, 0, 0.5)) set_pose(cone, get_link_pose(self.robot, self.link)) wait_for_duration(1e-2) wait_for_duration(self._duration) remove_body(cone) wait_for_duration(1e-2) state.registered.add(self.body) yield
def parse_fluents(robot, brick_from_index, fluents, obstacles): tool_link = link_from_name(robot, TOOL_NAME) attachments = [] for fact in fluents: if fact[0] == 'atpose': index, pose = fact[1:] body = brick_from_index[index].body set_pose(body, pose.value) obstacles.append(body) elif fact[0] == 'atgrasp': index, grasp = fact[1:] body = brick_from_index[index].body attachments.append(Attachment(robot, tool_link, grasp.attach, body)) else: raise NotImplementedError(fact[0]) return attachments
def gen(o, p): if o in task.rooms: # TODO: predicate instead return # default_conf = arm_conf(a, g.carry) # joints = get_arm_joints(robot, a) # TODO: check collisions with fixed links target_point = point_from_pose(p.value) base_generator = visible_base_generator(robot, target_point, base_range) while True: set_pose(o, p.value) # p.assign() bq = Conf(robot, base_joints, next(base_generator)) # bq = Pose(robot, get_pose(robot)) bq.assign() if any(pairwise_collision(robot, b) for b in fixed): yield None else: yield (bq, )
def apply(self, state, **kwargs): # TODO: check if actually can register mesh, _ = get_detection_cone(self.robot, self.body, camera_link=self.camera_frame, depth=self.max_depth) if mesh is None: wait_for_user() assert ( mesh is not None ) # TODO: is_visible_aabb(body_aabb, **kwargs)=False sometimes without collisions with LockRenderer(): cone = create_mesh(mesh, color=apply_alpha(GREEN, 0.5)) set_pose(cone, get_link_pose(self.robot, self.link)) wait_for_duration(1e-2) wait_for_duration(self._duration) remove_body(cone) wait_for_duration(1e-2) state.registered.add(self.body) yield
def load_world(): # TODO: store internal world info here to be reloaded robot = load_model(DRAKE_IIWA_URDF) # robot = load_model(KUKA_IIWA_URDF) floor = load_model('models/short_floor.urdf') sink = load_model(SINK_URDF, pose=Pose(Point(x=-0.5))) stove = load_model(STOVE_URDF, pose=Pose(Point(x=+0.5))) block = load_model(BLOCK_URDF, fixed_base=False) #cup = load_model('models/dinnerware/cup/cup_small.urdf', Pose(Point(x=+0.5, y=+0.5, z=0.5)), fixed_base=False) body_names = { sink: 'sink', stove: 'stove', block: 'celery', } movable_bodies = [block] set_pose(block, Pose(Point(y=0.5, z=stable_z(block, floor)))) set_default_camera() return robot, body_names, movable_bodies
def fn(o, p, bq): set_pose(o, p.value) # p.assign() bq.assign() if o in task.rooms: waypoints = plan_scan_path(task.robot, tilt=ROOM_SCAN_TILT) set_group_conf(robot, 'head', waypoints[0]) path = plan_waypoints_joint_motion(robot, head_joints, waypoints[1:], obstacles=None, self_collisions=False) if path is None: return None ht = create_trajectory(robot, head_joints, path) hq = ht.path[0] else: target_point = point_from_pose(p.value) head_conf = inverse_visibility(robot, target_point) if head_conf is None: # TODO: test if visible return None hq = Conf(robot, head_joints, head_conf) ht = Trajectory([hq]) return (hq, ht)
def gen_fn(index, pose, grasp): body = brick_from_index[index].body #world_pose = get_link_pose(robot, tool_link) #draw_pose(world_pose, length=0.04) #set_pose(body, multiply(world_pose, grasp.attach)) #draw_pose(multiply(pose.value, invert(grasp.attach)), length=0.04) #wait_for_interrupt() set_pose(body, pose.value) for _ in range(max_attempts): set_joint_positions(robot, movable_joints, sample_fn()) # Random seed attach_pose = multiply(pose.value, invert(grasp.attach)) attach_conf = inverse_kinematics(robot, tool_link, attach_pose) if attach_conf is None: continue approach_pose = multiply(attach_pose, ([0, 0, -0.1], unit_quat())) #approach_pose = multiply(pose.value, invert(grasp.approach)) approach_conf = inverse_kinematics(robot, tool_link, approach_pose) if approach_conf is None: continue # TODO: retreat path = plan_waypoints_joint_motion( robot, movable_joints, [approach_conf, attach_conf], obstacles=obstacle_from_name.values(), self_collisions=True, disabled_collisions=disabled_collisions) if path is None: continue #path = [approach_conf, attach_conf] attachment = Attachment(robot, tool_link, grasp.attach, body) traj = MotionTrajectory(robot, movable_joints, path, attachments=[attachment]) yield approach_conf, traj break
def main(): parser = argparse.ArgumentParser() # Automatically includes help parser.add_argument('-viewer', action='store_true', help='enable viewer.') args = parser.parse_args() connect(use_gui=True) with LockRenderer(): draw_pose(unit_pose(), length=1, width=1) floor = create_floor() set_point(floor, Point(z=-EPSILON)) table1 = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH / 2, height=TABLE_WIDTH / 2, top_color=TAN, cylinder=False) set_point(table1, Point(y=+0.5)) table2 = create_table(width=TABLE_WIDTH, length=TABLE_WIDTH / 2, height=TABLE_WIDTH / 2, top_color=TAN, cylinder=False) set_point(table2, Point(y=-0.5)) tables = [table1, table2] #set_euler(table1, Euler(yaw=np.pi/2)) with HideOutput(): # data_path = add_data_path() # robot_path = os.path.join(data_path, WSG_GRIPPER) robot_path = get_model_path( WSG_50_URDF) # WSG_50_URDF | PANDA_HAND_URDF robot = load_pybullet(robot_path, fixed_base=True) #dump_body(robot) block1 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=RED) block_z = stable_z(block1, table1) set_point(block1, Point(y=-0.5, z=block_z)) block2 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=GREEN) set_point(block2, Point(x=-0.25, y=-0.5, z=block_z)) block3 = create_box(w=BLOCK_SIDE, l=BLOCK_SIDE, h=BLOCK_SIDE, color=BLUE) set_point(block3, Point(x=-0.15, y=+0.5, z=block_z)) blocks = [block1, block2, block3] set_camera_pose(camera_point=Point(x=-1, z=block_z + 1), target_point=Point(z=block_z)) block_pose = get_pose(block1) open_gripper(robot) tool_link = link_from_name(robot, 'tool_link') base_from_tool = get_relative_pose(robot, tool_link) #draw_pose(unit_pose(), parent=robot, parent_link=tool_link) grasps = get_side_grasps(block1, tool_pose=Pose(euler=Euler(yaw=np.pi / 2)), top_offset=0.02, grasp_length=0.03, under=False)[1:2] for grasp in grasps: gripper_pose = multiply(block_pose, invert(grasp)) set_pose(robot, multiply(gripper_pose, invert(base_from_tool))) wait_for_user() wait_for_user('Finish?') disconnect()
def load_world(): # TODO: store internal world info here to be reloaded with HideOutput(): robot = load_model(DRAKE_IIWA_URDF) # robot = load_model(KUKA_IIWA_URDF) floor = load_model('models/short_floor.urdf') sink = load_model(SINK_URDF, pose=Pose(Point(x=-0.5))) stove = load_model(STOVE_URDF, pose=Pose(Point(x=+0.5))) block = load_model(BLOCK_URDF, fixed_base=False) block1 = load_model(BLOCK_URDF, fixed_base=False) block2 = load_model(BLOCK_URDF, fixed_base=False) block3 = load_model(BLOCK_URDF, fixed_base=False) block4 = load_model(BLOCK_URDF, fixed_base=False) block5 = load_model(BLOCK_URDF, fixed_base=False) block6 = load_model(BLOCK_URDF, fixed_base=False) block7 = load_model(BLOCK_URDF, fixed_base=False) block8 = load_model(BLOCK_URDF, fixed_base=False) cup = load_model( 'models/cup.urdf', #'models/dinnerware/cup/cup_small.urdf' fixed_base=False) body_names = { sink: 'sink', stove: 'stove', block: 'celery', cup: 'cup', } movable_bodies = [ block, cup, block1, block2, block3, block4, block5, block6, block7, block8 ] set_pose(block, Pose(Point(x=0.1, y=0.5, z=stable_z(block, floor)))) set_pose(block1, Pose(Point(x=-0.1, y=0.5, z=stable_z(block1, floor)))) set_pose(block2, Pose(Point(y=0.35, z=stable_z(block2, floor)))) set_pose(block3, Pose(Point(y=0.65, z=stable_z(block3, floor)))) set_pose(block4, Pose(Point(x=0.1, y=0.65, z=stable_z(block4, floor)))) set_pose(block5, Pose(Point(x=-0.1, y=0.65, z=stable_z(block5, floor)))) set_pose(block6, Pose(Point(x=0.1, y=0.35, z=stable_z(block6, floor)))) set_pose(block7, Pose(Point(x=-0.1, y=0.35, z=stable_z(block7, floor)))) set_pose(block8, Pose(Point(x=0, y=0.5, z=0.45))) set_pose(cup, Pose(Point(y=0.5, z=stable_z(cup, floor)))) set_default_camera() return robot, body_names, movable_bodies