def test_drake_base_motion(pr2, base_start, base_goal, obstacles=[]): # TODO: combine this with test_arm_motion """ Drake's PR2 URDF has explicit base joints """ disabled_collisions = get_disabled_collisions(pr2) base_joints = [joint_from_name(pr2, name) for name in PR2_GROUPS['base']] set_joint_positions(pr2, base_joints, base_start) base_joints = base_joints[:2] base_goal = base_goal[:len(base_joints)] wait_if_gui('Plan Base?') with LockRenderer(lock=False): base_path = plan_joint_motion(pr2, base_joints, base_goal, obstacles=obstacles, disabled_collisions=disabled_collisions) if base_path is None: print('Unable to find a base path') return print(len(base_path)) for bq in base_path: set_joint_positions(pr2, base_joints, bq) if SLEEP is None: wait_if_gui('Continue?') else: wait_for_duration(SLEEP)
def iterate_commands(state, commands, time_step=DEFAULT_TIME_STEP, pause=False): if commands is None: return False for i, command in enumerate(commands): print('\nCommand {:2}/{:2}: {}'.format(i + 1, len(commands), command)) # TODO: skip to end # TODO: downsample for j, _ in enumerate(command.iterate(state)): state.derive() if j == 0: continue if time_step is None: wait_for_duration(1e-2) wait_for_user('Command {:2}/{:2} | step {:2} | Next?'.format( i + 1, len(commands), j)) elif time_step == 0: pass else: wait_for_duration(time_step) if pause: wait_for_user('Continue?') return True
def simulate_parallel(robots, plan, time_step=0.1, speed_up=10., record=None): # None | video.mp4 # TODO: ensure the step size is appropriate makespan = compute_duration(plan) print('\nMakespan: {:.3f}'.format(makespan)) trajectories = extract_parallel_trajectories(plan) if trajectories is None: return wait_if_gui('Begin?') num_motion = sum(action.name == 'move' for action in plan) with VideoSaver(record): for t in inclusive_range(0, makespan, time_step): # if action.start <= t <= get_end(action): executing = Counter(traj.robot for traj in trajectories if traj.at(t) is not None) print('t={:.3f}/{:.3f} | executing={}'.format( t, makespan, executing)) for robot in robots: num = executing.get(robot, 0) if 2 <= num: raise RuntimeError( 'Robot {} simultaneously executing {} trajectories'. format(robot, num)) if (num_motion == 0) and (num == 0): set_configuration(robot, DUAL_CONF) #step_simulation() wait_for_duration(time_step / speed_up) wait_if_gui('Finish?')
def are_visible(world): ray_names = [] rays = [] for name in world.movable: for camera, info in world.cameras.items(): camera_pose = get_pose(info.body) camera_point = point_from_pose(camera_pose) point = point_from_pose(get_pose(world.get_body(name))) if is_visible_point(CAMERA_MATRIX, KINECT_DEPTH, point, camera_pose=camera_pose): ray_names.append(name) rays.append(Ray(camera_point, point)) ray_results = batch_ray_collision(rays) visible_indices = [ idx for idx, (name, result) in enumerate(zip(ray_names, ray_results)) if result.objectUniqueId == world.get_body(name) ] visible_names = {ray_names[idx] for idx in visible_indices} print('Detected:', sorted(visible_names)) if has_gui(): handles = [ add_line(rays[idx].start, rays[idx].end, color=BLUE) for idx in visible_indices ] wait_for_duration(1.0) remove_handles(handles) # TODO: the object drop seems to happen around here return visible_names
def step_trajectory(trajectory, attachments={}, time_step=np.inf): for _ in trajectory.iterate(): for attachment in attachments.values(): attachment.assign() if time_step == np.inf: wait_for_interrupt(time_step) else: wait_for_duration(time_step)
def step_curve(robot, joints, path, step_size=None): wait_if_gui(message='Begin?') for i, conf in enumerate(path): set_joint_positions(robot, joints, conf) if step_size is None: wait_if_gui(message='{}/{} Continue?'.format(i, len(path))) else: wait_for_duration(duration=step_size) wait_if_gui(message='Finish?')
def main(execute='apply'): #parser = argparse.ArgumentParser() # Automatically includes help #parser.add_argument('-display', action='store_true', help='enable viewer.') #args = parser.parse_args() #display = args.display display = True #display = False #filename = 'transporter.json' # simple | simple2 | cook | invert | kitchen | nonmonotonic_4_1 #problem_fn = lambda: load_json_problem(filename) problem_fn = cooking_problem # holding_problem | stacking_problem | cleaning_problem | cooking_problem # cleaning_button_problem | cooking_button_problem with HideOutput(): sim_world = connect(use_gui=False) set_client(sim_world) add_data_path() problem = problem_fn() print(problem) #state_id = save_state() if display: with HideOutput(): real_world = connect(use_gui=True) add_data_path() with ClientSaver(real_world): problem_fn() # TODO: way of doing this without reloading? update_state() wait_for_duration(0.1) #wait_for_interrupt() commands = plan_commands(problem) if (commands is None) or not display: with HideOutput(): disconnect() return time_step = 0.01 set_client(real_world) if execute == 'control': enable_gravity() control_commands(commands) elif execute == 'execute': step_commands(commands, time_step=time_step) elif execute == 'step': step_commands(commands) elif execute == 'apply': state = State() apply_commands(state, commands, time_step=time_step) else: raise ValueError(execute) wait_for_interrupt() with HideOutput(): disconnect()
def step_command(world, command, attachments, time_step=None): # TODO: end_only # More generally downsample for i, _ in enumerate(command.iterate(world, attachments)): for attachment in attachments.values(): attachment.assign() if i == 0: continue if time_step is None: wait_for_duration(1e-2) wait_for_user('Step {}) Next?'.format(i)) else: wait_for_duration(time_step)
def test_arm_motion(pr2, left_joints, arm_goal): disabled_collisions = get_disabled_collisions(pr2) wait_if_gui('Plan Arm?') with LockRenderer(lock=False): arm_path = plan_joint_motion(pr2, left_joints, arm_goal, disabled_collisions=disabled_collisions) if arm_path is None: print('Unable to find an arm path') return print(len(arm_path)) for q in arm_path: set_joint_positions(pr2, left_joints, q) #wait_if_gui('Continue?') wait_for_duration(0.01)
def test_kinematic(robot, door, target_x): wait_if_gui('Begin?') robot_joints = get_movable_joints(robot)[:3] joint = robot_joints[0] start_x = get_joint_position(robot, joint) num_steps = int(math.ceil(abs(target_x - start_x) / 1e-2)) for x in interpolate(start_x, target_x, num_steps=num_steps): set_joint_position(robot, joint=joint, value=x) #with LockRenderer(): solve_collision_free(door, robot, draw=False) wait_for_duration(duration=1e-2) #wait_if_gui() wait_if_gui('Finish?')
def iterate_path(robot, joints, path, step_size=None): # 1e-2 | None if path is None: return path = adjust_path(robot, joints, path) with LockRenderer(): handles = draw_path(path) wait_if_gui(message='Begin?') for i, conf in enumerate(path): set_joint_positions(robot, joints, conf) if step_size is None: wait_if_gui(message='{}/{} Continue?'.format(i, len(path))) else: wait_for_duration(duration=step_size) wait_if_gui(message='Finish?')
def test_base_motion(pr2, base_start, base_goal, obstacles=[]): #disabled_collisions = get_disabled_collisions(pr2) set_base_values(pr2, base_start) wait_if_gui('Plan Base-pr2?') base_limits = ((-2.5, -2.5), (2.5, 2.5)) with LockRenderer(lock=False): base_path = plan_base_motion(pr2, base_goal, base_limits, obstacles=obstacles) if base_path is None: print('Unable to find a base path') return print(len(base_path)) for bq in base_path: set_base_values(pr2, bq) if SLEEP is None: wait_if_gui('Continue?') else: wait_for_duration(SLEEP)
def simulate_parallel(robots, plan, time_step=0.1, speed_up=10., record=None): # None | video.mp4 # TODO: ensure the step size is appropriate makespan = compute_duration(plan) print('\nMakespan: {:.3f}'.format(makespan)) if plan is None: return trajectories = [] for action in plan: command = action.args[-1] if (action.name == 'move') and (command.start_conf is action.args[-2].positions): command = command.reverse() command.retime(start_time=action.start) #print(action) #print(action.start, get_end(action), action.duration) #print(command.start_time, command.end_time, command.duration) #for traj in command.trajectories: # print(traj, traj.start_time, traj.end_time, traj.duration) trajectories.extend(command.trajectories) #print(sum(traj.duration for traj in trajectories)) num_motion = sum(action.name == 'move' for action in plan) wait_if_gui('Begin?') with VideoSaver(record): for t in inclusive_range(0, makespan, time_step): # if action.start <= t <= get_end(action): executing = Counter(traj.robot for traj in trajectories if traj.at(t) is not None) print('t={:.3f}/{:.3f} | executing={}'.format( t, makespan, executing)) for robot in robots: num = executing.get(robot, 0) if 2 <= num: raise RuntimeError( 'Robot {} simultaneously executing {} trajectories'. format(robot, num)) if (num_motion == 0) and (num == 0): set_configuration(robot, DUAL_CONF) #step_simulation() wait_for_duration(time_step / speed_up) wait_if_gui('Finish?')
def simulate(self, state, real_per_sim=1, time_step=1. / 60, **kwargs): path = list(self.path) path = adjust_path(self.robot, self.joints, path) path = waypoints_from_path(path) if len(path) <= 1: return True for joints, path in decompose_into_paths(self.joints, path): positions_curve = interpolate_path(self.robot, joints, path) print('Following {} {}-DOF waypoints in {:.3f} seconds'.format( len(path), len(joints), positions_curve.x[-1])) for t in np.arange(positions_curve.x[0], positions_curve.x[-1], step=time_step): positions = positions_curve(t) set_joint_positions(self.robot, joints, positions) state.derive() wait_for_duration(real_per_sim * time_step) return True
def simulate_printing(node_points, trajectories, time_step=0.1, speed_up=10.): # TODO: deprecate print_trajectories = [ traj for traj in trajectories if isinstance(traj, PrintTrajectory) ] handles = [] current_time = 0. current_traj = print_trajectories.pop(0) current_curve = current_traj.interpolate_tool(node_points, start_time=current_time) current_position = current_curve.y[0] while True: print('Time: {:.3f} | Remaining: {} | Segments: {}'.format( current_time, len(print_trajectories), len(handles))) end_time = current_curve.x[-1] if end_time < current_time + time_step: handles.append( add_line(current_position, current_curve.y[-1], color=RED)) if not print_trajectories: break current_traj = print_trajectories.pop(0) current_curve = current_traj.interpolate_tool(node_points, start_time=end_time) current_position = current_curve.y[0] print( 'New trajectory | Start time: {:.3f} | End time: {:.3f} | Duration: {:.3f}' .format(current_curve.x[0], current_curve.x[-1], current_curve.x[-1] - current_curve.x[0])) else: current_time += time_step new_position = current_curve(current_time) handles.append(add_line(current_position, new_position, color=RED)) current_position = new_position # TODO: longer wait for recording videos wait_for_duration(time_step / speed_up) # wait_if_gui() wait_if_gui() return handles
def check_plan(extrusion_path, planned_elements, verbose=False): element_from_id, node_points, ground_nodes = load_extrusion(extrusion_path) #checker = create_stiffness_checker(extrusion_name) connected_nodes = set(ground_nodes) handles = [] all_connected = True all_stiff = True extruded_elements = set() for element in planned_elements: extruded_elements.add(element) n1, n2 = element #is_connected &= any(n in connected_nodes for n in element) is_connected = (n1 in connected_nodes) connected_nodes.update(element) #is_connected = check_connected(ground_nodes, extruded_elements) #all_connected &= is_connected is_stiff = test_stiffness(extrusion_path, element_from_id, extruded_elements, verbose=verbose) all_stiff &= is_stiff if verbose: structures = get_connected_structures(extruded_elements) print('Elements: {} | Structures: {} | Connected: {} | Stiff: {}'. format(len(extruded_elements), len(structures), is_connected, is_stiff)) if has_gui(): is_stable = is_connected and is_stiff color = BLACK if is_stable else RED handles.append(draw_element(node_points, element, color)) wait_for_duration(0.1) if not is_stable: wait_for_user() # Make these counts instead print('Connected: {} | Stiff: {}'.format(all_connected, all_stiff)) return all_connected and all_stiff
def solve_pddlstream(belief, problem, args, skeleton=None, replan_actions=set(), max_time=INF, max_memory=MAX_MEMORY, max_cost=INF): set_cost_scale(COST_SCALE) reset_globals() stream_info = get_stream_info() #print(set(stream_map) - set(stream_info)) skeletons = create_ordered_skeleton(skeleton) max_cost = min(max_cost, COST_BOUND) print('Max cost: {:.3f} | Max runtime: {:.3f}'.format(max_cost, max_time)) constraints = PlanConstraints(skeletons=skeletons, max_cost=max_cost, exact=True) success_cost = 0 if args.anytime else INF planner = 'ff-astar' if args.anytime else 'ff-wastar2' search_sample_ratio = 0.5 # 0.5 max_planner_time = 10 # TODO: max number of samples per iteration flag # TODO: don't greedily expand samples with too high of a complexity if out of time pr = cProfile.Profile() pr.enable() saver = WorldSaver() sim_state = belief.sample_state() sim_state.assign() wait_for_duration(0.1) with LockRenderer(lock=not args.visualize): # TODO: option to only consider costs during local optimization # effort_weight = 0 if args.anytime else 1 effort_weight = 1e-3 if args.anytime else 1 #effort_weight = 0 #effort_weight = None solution = solve_focused( problem, constraints=constraints, stream_info=stream_info, replan_actions=replan_actions, initial_complexity=5, planner=planner, max_planner_time=max_planner_time, unit_costs=args.unit, success_cost=success_cost, max_time=max_time, max_memory=max_memory, verbose=True, debug=False, unit_efforts=True, effort_weight=effort_weight, max_effort=INF, # bind=True, max_skeletons=None, search_sample_ratio=search_sample_ratio) saver.restore() # print([(s.cost, s.time) for s in SOLUTIONS]) # print(SOLUTIONS) print_solution(solution) pr.disable() pstats.Stats(pr).sort_stats('tottime').print_stats(25) # cumtime | tottime return solution
def simulate(self, state, **kwargs): wait_for_duration(self.duration)
def simulate(self, state, time_per_step=DEFAULT_TIME_STEP, **kwargs): for j, _ in enumerate(self.iterate(state)): state.derive() if j != 0: wait_for_duration(time_per_step)
def display_trajectories(node_points, ground_nodes, trajectories, animate=True, time_step=0.02, video=False): if trajectories is None: return set_extrusion_camera(node_points) planned_elements = recover_sequence(trajectories) colors = sample_colors(len(planned_elements)) # if not animate: # draw_ordered(planned_elements, node_points) # wait_for_user() # disconnect() # return video_saver = None if video: handles = draw_model(planned_elements, node_points, ground_nodes) # Allows user to adjust the camera wait_for_user() remove_all_debug() wait_for_duration(0.1) video_saver = VideoSaver('video.mp4') # has_gui() time_step = 0.001 else: wait_for_user() #element_bodies = dict(zip(planned_elements, create_elements(node_points, planned_elements))) #for body in element_bodies.values(): # set_color(body, (1, 0, 0, 0)) connected_nodes = set(ground_nodes) printed_elements = [] print('Trajectories:', len(trajectories)) for i, trajectory in enumerate(trajectories): #wait_for_user() #set_color(element_bodies[element], (1, 0, 0, 1)) last_point = None handles = [] for _ in trajectory.iterate(): # TODO: the robot body could be different if isinstance(trajectory, PrintTrajectory): current_point = point_from_pose( trajectory.end_effector.get_tool_pose()) if last_point is not None: # color = BLUE if is_ground(trajectory.element, ground_nodes) else RED color = colors[len(printed_elements)] handles.append( add_line(last_point, current_point, color=color, width=LINE_WIDTH)) last_point = current_point if time_step is None: wait_for_user() else: wait_for_duration(time_step) if isinstance(trajectory, PrintTrajectory): if not trajectory.path: color = colors[len(printed_elements)] handles.append( draw_element(node_points, trajectory.element, color=color)) #wait_for_user() is_connected = (trajectory.n1 in connected_nodes ) # and (trajectory.n2 in connected_nodes) print('{}) {:9} | Connected: {} | Ground: {} | Length: {}'.format( i, str(trajectory), is_connected, is_ground(trajectory.element, ground_nodes), len(trajectory.path))) if not is_connected: wait_for_user() connected_nodes.add(trajectory.n2) printed_elements.append(trajectory.element) if video_saver is not None: video_saver.restore() wait_for_user()
def main(floor_width=2.0): # Creates a pybullet world and a visualizer for it connect(use_gui=True) identity_pose = (unit_point(), unit_quat()) origin_handles = draw_pose( identity_pose, length=1.0 ) # Draws the origin coordinate system (x:RED, y:GREEN, z:BLUE) # Bodies are described by an integer index floor = create_box(w=floor_width, l=floor_width, h=0.001, color=TAN) # Creates a tan box object for the floor set_point(floor, [0, 0, -0.001 / 2.]) # Sets the [x,y,z] translation of the floor obstacle = create_box(w=0.5, l=0.5, h=0.1, color=RED) # Creates a red box obstacle set_point( obstacle, [0.5, 0.5, 0.1 / 2.]) # Sets the [x,y,z] position of the obstacle print('Position:', get_point(obstacle)) set_euler(obstacle, [0, 0, np.pi / 4 ]) # Sets the [roll,pitch,yaw] orientation of the obstacle print('Orientation:', get_euler(obstacle)) with LockRenderer( ): # Temporarily prevents the renderer from updating for improved loading efficiency with HideOutput(): # Temporarily suppresses pybullet output robot = load_model(ROOMBA_URDF) # Loads a robot from a *.urdf file robot_z = stable_z( robot, floor ) # Returns the z offset required for robot to be placed on floor set_point(robot, [0, 0, robot_z]) # Sets the z position of the robot dump_body(robot) # Prints joint and link information about robot set_all_static() # Joints are also described by an integer index # The turtlebot has explicit joints representing x, y, theta x_joint = joint_from_name(robot, 'x') # Looks up the robot joint named 'x' y_joint = joint_from_name(robot, 'y') # Looks up the robot joint named 'y' theta_joint = joint_from_name( robot, 'theta') # Looks up the robot joint named 'theta' joints = [x_joint, y_joint, theta_joint] base_link = link_from_name( robot, 'base_link') # Looks up the robot link named 'base_link' world_from_obstacle = get_pose( obstacle ) # Returns the pose of the origin of obstacle wrt the world frame obstacle_aabb = get_subtree_aabb(obstacle) draw_aabb(obstacle_aabb) random.seed(0) # Sets the random number generator state handles = [] for i in range(10): for handle in handles: remove_debug(handle) print('\nIteration: {}'.format(i)) x = random.uniform(-floor_width / 2., floor_width / 2.) set_joint_position(robot, x_joint, x) # Sets the current value of the x joint y = random.uniform(-floor_width / 2., floor_width / 2.) set_joint_position(robot, y_joint, y) # Sets the current value of the y joint yaw = random.uniform(-np.pi, np.pi) set_joint_position(robot, theta_joint, yaw) # Sets the current value of the theta joint values = get_joint_positions( robot, joints) # Obtains the current values for the specified joints print('Joint values: [x={:.3f}, y={:.3f}, yaw={:.3f}]'.format(*values)) world_from_robot = get_link_pose( robot, base_link) # Returns the pose of base_link wrt the world frame position, quaternion = world_from_robot # Decomposing pose into position and orientation (quaternion) x, y, z = position # Decomposing position into x, y, z print('Base link position: [x={:.3f}, y={:.3f}, z={:.3f}]'.format( x, y, z)) euler = euler_from_quat( quaternion) # Converting from quaternion to euler angles roll, pitch, yaw = euler # Decomposing orientation into roll, pitch, yaw print('Base link orientation: [roll={:.3f}, pitch={:.3f}, yaw={:.3f}]'. format(roll, pitch, yaw)) handles.extend( draw_pose(world_from_robot, length=0.5) ) # # Draws the base coordinate system (x:RED, y:GREEN, z:BLUE) obstacle_from_robot = multiply( invert(world_from_obstacle), world_from_robot) # Relative transformation from robot to obstacle robot_aabb = get_subtree_aabb( robot, base_link) # Computes the robot's axis-aligned bounding box (AABB) lower, upper = robot_aabb # Decomposing the AABB into the lower and upper extrema center = (lower + upper) / 2. # Computing the center of the AABB extent = upper - lower # Computing the dimensions of the AABB handles.extend(draw_aabb(robot_aabb)) collision = pairwise_collision( robot, obstacle ) # Checks whether robot is currently colliding with obstacle print('Collision: {}'.format(collision)) wait_for_duration(1.0) # Like sleep() but also updates the viewer wait_for_user() # Like raw_input() but also updates the viewer # Destroys the pybullet world disconnect()
def main(): np.set_printoptions(precision=N_DIGITS, suppress=True, threshold=3) # , edgeitems=1) #, linewidth=1000) parser = argparse.ArgumentParser() parser.add_argument( '-a', '--algorithm', default='rrt_connect', # choices=[], help='The motion planning algorithm to use.') parser.add_argument('-c', '--cfree', action='store_true', help='When enabled, disables collision checking.') # parser.add_argument('-p', '--problem', default='test_pour', choices=sorted(problem_fn_from_name), # help='The name of the problem to solve.') parser.add_argument( '--holonomic', action='store_true', # '-h', help='') parser.add_argument('-l', '--lock', action='store_false', help='') parser.add_argument('-r', '--reversible', action='store_true', help='') parser.add_argument( '-s', '--seed', default=None, type=int, # None | 1 help='The random seed to use.') parser.add_argument('-n', '--num', default=10, type=int, help='The number of obstacles.') parser.add_argument('-o', '--orientation', action='store_true', help='') parser.add_argument('-v', '--viewer', action='store_false', help='') args = parser.parse_args() connect(use_gui=args.viewer) #set_aabb_buffer(buffer=1e-3) #set_separating_axis_collisions() #seed = 0 #seed = time.time() seed = args.seed if seed is None: seed = random.randint(0, 10**3 - 1) print('Seed:', seed) set_random_seed(seed=seed) # None: 2147483648 = 2**31 set_numpy_seed(seed=seed) #print('Random seed:', get_random_seed(), random.random()) #print('Numpy seed:', get_numpy_seed(), np.random.random()) ######################### robot, base_limits, goal_conf, obstacles = problem1(n_obstacles=args.num) custom_limits = create_custom_base_limits(robot, base_limits) base_joints = joints_from_names(robot, BASE_JOINTS) draw_base_limits(base_limits) # draw_pose(get_link_pose(robot, base_link), length=0.5) start_conf = get_joint_positions(robot, base_joints) for conf in [start_conf, goal_conf]: draw_waypoint(conf) #resolutions = None #resolutions = np.array([0.05, 0.05, math.radians(10)]) plan_joints = base_joints[:2] if not args.orientation else base_joints d = len(plan_joints) holonomic = args.holonomic or (d != 3) resolutions = 1. * DEFAULT_RESOLUTION * np.ones( d) # TODO: default resolutions, velocities, accelerations fns #weights = np.reciprocal(resolutions) weights = np.array([1, 1, 1e-3])[:d] cost_fn = get_acceleration_fn(robot, plan_joints, max_velocities=MAX_VELOCITIES[:d], max_accelerations=MAX_ACCELERATIONS[:d]) # TODO: check that taking shortest turning direction (reversible affecting?) if args.cfree: obstacles = [] # for obstacle in obstacles: # draw_aabb(get_aabb(obstacle)) # Updates automatically #set_all_static() # Doesn't seem to affect #test_aabb(robot) #test_caching(robot, obstacles) #return ######################### # TODO: filter if straight-line path is feasible saver = WorldSaver() wait_for_duration(duration=1e-2) start_time = time.time() with LockRenderer(lock=args.lock): with Profiler(field='cumtime', num=25): # tottime | cumtime | None # TODO: draw the search tree path = plan_base_joint_motion( robot, plan_joints, goal_conf[:d], holonomic=holonomic, reversible=args.reversible, obstacles=obstacles, self_collisions=False, custom_limits=custom_limits, use_aabb=True, cache=True, max_distance=MIN_PROXIMITY, resolutions=resolutions, weights=weights, # TODO: use KDTrees circular={ 2: UNBOUNDED_LIMITS if holonomic else CIRCULAR_LIMITS }, cost_fn=cost_fn, algorithm=args.algorithm, verbose=True, restarts=5, max_iterations=50, smooth=None if holonomic else 100, smooth_time=1, # None | 100 | INF ) saver.restore() # TODO: draw ROADMAPS #wait_for_duration(duration=1e-3) ######################### solved = path is not None length = INF if path is None else len(path) cost = compute_cost(robot, base_joints, path, resolutions=resolutions[:len(plan_joints)]) print( 'Solved: {} | Length: {} | Cost: {:.3f} | Runtime: {:.3f} sec'.format( solved, length, cost, elapsed_time(start_time))) if path is None: wait_if_gui() disconnect() return # for i, conf in enumerate(path): # set_joint_positions(robot, plan_joints, conf) # wait_if_gui('{}/{}) Continue?'.format(i + 1, len(path))) path = extract_full_path(robot, plan_joints, path, base_joints) with LockRenderer(): draw_last_roadmap(robot, base_joints) # for i, conf in enumerate(path): # set_joint_positions(robot, base_joints, conf) # wait_if_gui('{}/{}) Continue?'.format(i+1, len(path))) iterate_path( robot, base_joints, path, step_size=2e-2, smooth=holonomic, custom_limits=custom_limits, resolutions=DEFAULT_RESOLUTION * np.ones(3), # resolutions obstacles=obstacles, self_collisions=False, max_distance=MIN_PROXIMITY) disconnect()
def main(): task_names = [fn.__name__ for fn in TASKS_FNS] print('Tasks:', task_names) parser = create_parser() parser.add_argument('-problem', default=task_names[-1], choices=task_names, help='The name of the problem to solve.') parser.add_argument('-record', action='store_true', help='When enabled, records and saves a video at {}'.format( VIDEO_TEMPLATE.format('<problem>'))) args = parser.parse_args() #if args.seed is not None: # set_seed(args.seed) #set_random_seed(0) # Doesn't ensure deterministic #set_numpy_seed(1) print('Random seed:', get_random_seed()) print('Numpy seed:', get_numpy_seed()) np.set_printoptions(precision=3, suppress=True) world = World(use_gui=True) task_fn_from_name = {fn.__name__: fn for fn in TASKS_FNS} task_fn = task_fn_from_name[args.problem] task = task_fn(world, num=args.num, fixed=args.fixed) wait_for_duration(0.1) world._update_initial() print('Objects:', task.objects) #target_point = get_point(world.get_body(task.objects[0])) #set_camera_pose(camera_point=target_point+np.array([-1, 0, 1]), target_point=target_point) #if not args.record: # with LockRenderer(): # add_markers(task, inverse_place=False) #wait_for_user() # TODO: FD instantiation is slightly slow to a deepcopy # 4650801/25658 2.695 0.000 8.169 0.000 /home/caelan/Programs/srlstream/pddlstream/pddlstream/algorithms/skeleton.py:114(do_evaluate_helper) #test_observation(world, entity_name='big_red_block0') #return # TODO: mechanism that pickles the state of the world real_state = create_state(world) video = None if args.record: wait_for_user('Start?') video_path = VIDEO_TEMPLATE.format(args.problem) video = VideoSaver(video_path) time_step = None if args.teleport else DEFAULT_TIME_STEP def observation_fn(belief): return observe_pybullet(world) def transition_fn(belief, commands): # if not args.record: # Video doesn't include planning time # wait_for_user() # restore real_state just in case? # wait_for_user() #if args.fixed: # args.simulate return simulate_commands(real_state, commands) #return iterate_commands(real_state, commands, time_step=time_step, pause=False) run_policy(task, args, observation_fn, transition_fn) if video: print('Saved', video_path) video.restore() world.destroy()
def visualize_vector_field(learner, bowl_type='blue_bowl', cup_type='red_cup', num=500, delta=False, alpha=0.5): print(learner, len(learner.results)) xx, yy, ww = learner.xx, learner.yy, learner.weights learner.hyperparameters = get_parameters(learner.model) print(learner.hyperparameters) learner.query_type = REJECTION # BEST | REJECTION | STRADDLE world = create_world() world.bodies[bowl_type] = load_cup_bowl(bowl_type) world.bodies[cup_type] = load_cup_bowl(cup_type) feature = get_pour_feature(world, bowl_type, cup_type) set_point(world.bodies[cup_type], np.array([0.2, 0, 0])) # TODO: visualize training data as well # TODO: artificially limit training data to make a low-confidence region # TODO: visualize mean and var at the same time using intensity and hue print('Feature:', feature) with LockRenderer(): #for parameter in islice(learner.parameter_generator(world, feature, valid=True), num): parameters = [] while len(parameters) < num: parameter = learner.sample_parameter() # TODO: special color if invalid if is_valid_pour(world, feature, parameter): parameters.append(parameter) center, _ = approximate_as_prism(world.get_body(cup_type)) bodies = [] with LockRenderer(): for parameter in parameters: path, _ = pour_path_from_parameter(world, feature, parameter) pose = path[0] body = create_cylinder(radius=0.001, height=0.02, color=apply_alpha(GREY, alpha)) set_pose(body, multiply(pose, Pose(point=center))) bodies.append(body) #train_sizes = inclusive_range(10, 100, 1) #train_sizes = inclusive_range(10, 100, 10) #train_sizes = inclusive_range(100, 1000, 100) #train_sizes = [1000] train_sizes = [None] # training_poses = [] # for result in learner.results[:train_sizes[-1]]: # path, _ = pour_path_from_parameter(world, feature, result['parameter']) # pose = path[0] # training_poses.append(pose) # TODO: draw the example as well scores_per_size = [] for train_size in train_sizes: if train_size is not None: learner.xx, learner.yy, learner.weights = xx[:train_size], yy[:train_size], ww[:train_size] learner.retrain() X = np.array([learner.func.x_from_feature_parameter(feature, parameter) for parameter in parameters]) predictions = learner.predict_x(X, noise=False) # Noise is just a constant scores_per_size.append([prediction['mean'] for prediction in predictions]) # mean | variance #scores_per_size.append([1./np.sqrt(prediction['variance']) for prediction in predictions]) #scores_per_size.append([prediction['mean'] / np.sqrt(prediction['variance']) for prediction in predictions]) #plt.hist(scores_per_size[-1]) #plt.show() #scores_per_size.append([scipy.stats.norm.interval(alpha=0.95, loc=prediction['mean'], # scale=np.sqrt(prediction['variance']))[0] # for prediction in predictions]) # mean | variance # score = learner.score(feature, parameter) if delta: scores_per_size = [np.array(s2) - np.array(s1) for s1, s2 in zip(scores_per_size, scores_per_size[1:])] train_sizes = train_sizes[1:] all_scores = [score for scores in scores_per_size for score in scores] #interval = (min(all_scores), max(all_scores)) interval = scipy.stats.norm.interval(alpha=0.95, loc=np.mean(all_scores), scale=np.std(all_scores)) #interval = DEFAULT_INTERVAL print('Interval:', interval) print('Mean: {:.3f} | Stdev: {:.3f}'.format(np.mean(all_scores), np.std(all_scores))) #train_body = create_cylinder(radius=0.002, height=0.02, color=apply_alpha(BLACK, 1.0)) for i, (train_size, scores) in enumerate(zip(train_sizes, scores_per_size)): print('Train size: {} | Average: {:.3f} | Min: {:.3f} | Max: {:.3f}'.format( train_size, np.mean(scores), min(scores), max(scores))) handles = [] # TODO: visualize delta with LockRenderer(): #if train_size is None: # train_size = len(learner.results) #set_pose(train_body, multiply(training_poses[train_size-1], Pose(point=center))) #set_pose(world.bodies[cup_type], training_poses[train_size-1]) for score, body in zip(scores, bodies): score = clip(score, *interval) fraction = rescale(score, interval, new_interval=(0, 1)) color = interpolate_hue(fraction) #color = (1 - fraction) * np.array(RED) + fraction * np.array(GREEN) # TODO: convex combination set_color(body, apply_alpha(color, alpha)) #set_pose(world.bodies[cup_type], pose) #draw_pose(pose, length=0.05) #handles.extend(draw_point(tform_point(pose, center), color=color)) #extent = np.array([0, 0, 0.01]) #start = tform_point(pose, center - extent/2) #end = tform_point(pose, center + extent/2) #handles.append(add_line(start, end, color=color, width=1)) wait_for_duration(0.5) # TODO: test on boundaries wait_for_user()