def main(deterministic=False, observable=False, collisions=True, focused=True, factor=True): # TODO: global search over the state belief_problem = get_belief_problem(deterministic, observable) pddlstream_problem = to_pddlstream(belief_problem, collisions) set_cost_scale(SCALE_COST) print('Cost scale:', get_cost_scale()) pr = cProfile.Profile() pr.enable() planner = 'ff-wastar1' if focused: stream_info = { 'GE': StreamInfo(from_test(ge_fn), eager=False), 'prob-after-move': StreamInfo(from_fn(get_opt_move_fn(factor=factor))), 'MoveCost': FunctionInfo(move_cost_fn), 'prob-after-look': StreamInfo(from_fn(get_opt_obs_fn(factor=factor))), 'LookCost': FunctionInfo(get_look_cost_fn(p_look_fp=0, p_look_fn=0)), } solution = solve_focused(pddlstream_problem, stream_info=stream_info, planner=planner, debug=False, success_cost=0, unit_costs=False, max_time=30) else: solution = solve_incremental(pddlstream_problem, planner=planner, debug=True, success_cost=MAX_COST, unit_costs=False, max_time=30) pr.disable() pstats.Stats(pr).sort_stats('tottime').print_stats(10) print_solution(solution) plan, cost, init = solution print('Real cost:', cost)
def set_unit_costs(domain): # Cost of None becomes zero if metric = True set_cost_scale(1) for action in domain.actions: action.cost = make_cost(1)
def solve_pddlstream(problem, node_points, element_bodies, planner=GREEDY_PLANNER, max_time=60): # TODO: try search at different cost levels (i.e. w/ and w/o abstract) # TODO: only consider axioms that could be relevant # TODO: iterated search using random restarts # TODO: most of the time seems to be spent extracting the stream plan # TODO: NEGATIVE_SUFFIX to make axioms easier # TODO: sort by action cost heuristic # http://www.fast-downward.org/Doc/Evaluator#Max_evaluator temporal = DURATIVE_ACTIONS in problem.domain_pddl print('Init:', problem.init) print('Goal:', problem.goal) print('Max time:', max_time) print('Temporal:', temporal) stream_info = { # TODO: stream effort 'sample-print': StreamInfo(PartialInputs(unique=True)), 'sample-move': StreamInfo(PartialInputs(unique=True)), 'test-cfree-traj-conf': StreamInfo(p_success=1e-2, negate=True), # , verbose=False), 'test-cfree-traj-traj': StreamInfo(p_success=1e-2, negate=True), 'TrajConfCollision': FunctionInfo(p_success=1e-1, overhead=1), # TODO: verbose 'TrajTrajCollision': FunctionInfo(p_success=1e-1, overhead=1), # TODO: verbose 'Distance': FunctionInfo(opt_fn=get_opt_distance_fn(element_bodies, node_points), eager=True) # 'Length': FunctionInfo(eager=True), # Need to eagerly evaluate otherwise 0 makespan (failure) # 'Duration': FunctionInfo(opt_fn=lambda r, t: opt_distance / TOOL_VELOCITY, eager=True), # 'Euclidean': FunctionInfo(eager=True), } # TODO: goal serialization # TODO: could revert back to goal count now that no deadends # TODO: limit the branching factor if necessary # TODO: ensure that function costs aren't prunning plans if not temporal: # Reachability heuristics good for detecting dead-ends # Infeasibility from the start means disconnected or collision set_cost_scale(1) # planner = 'ff-ehc' # planner = 'ff-lazy-tiebreak' # Branching factor becomes large. Rely on preferred. Preferred should also be cheaper planner = 'ff-eager-tiebreak' # Need to use a eager search, otherwise doesn't incorporate child cost # planner = 'max-astar' # TODO: assert (instance.value == value) with LockRenderer(lock=False): # solution = solve_incremental(problem, planner='add-random-lazy', max_time=600, # max_planner_time=300, debug=True) # TODO: allow some types of failures solution = solve_focused( problem, stream_info=stream_info, max_time=max_time, effort_weight=None, unit_efforts=True, unit_costs=False, # TODO: effort_weight=None vs 0 max_skeletons=None, bind=True, max_failures=INF, # 0 | INF planner=planner, max_planner_time=60, debug=False, reorder=False, initial_complexity=1) print_solution(solution) plan, _, certificate = solution # TODO: post-process by calling planner again # TODO: could solve for trajectories conditioned on the sequence return plan, certificate
def solve_pddlstream(belief, problem, args, skeleton=None, replan_actions=set(), max_time=INF, max_memory=MAX_MEMORY, max_cost=INF): set_cost_scale(COST_SCALE) reset_globals() stream_info = get_stream_info() #print(set(stream_map) - set(stream_info)) skeletons = create_ordered_skeleton(skeleton) max_cost = min(max_cost, COST_BOUND) print('Max cost: {:.3f} | Max runtime: {:.3f}'.format(max_cost, max_time)) constraints = PlanConstraints(skeletons=skeletons, max_cost=max_cost, exact=True) success_cost = 0 if args.anytime else INF planner = 'ff-astar' if args.anytime else 'ff-wastar2' search_sample_ratio = 0.5 # 0.5 max_planner_time = 10 # TODO: max number of samples per iteration flag # TODO: don't greedily expand samples with too high of a complexity if out of time pr = cProfile.Profile() pr.enable() saver = WorldSaver() sim_state = belief.sample_state() sim_state.assign() wait_for_duration(0.1) with LockRenderer(lock=not args.visualize): # TODO: option to only consider costs during local optimization # effort_weight = 0 if args.anytime else 1 effort_weight = 1e-3 if args.anytime else 1 #effort_weight = 0 #effort_weight = None solution = solve_focused( problem, constraints=constraints, stream_info=stream_info, replan_actions=replan_actions, initial_complexity=5, planner=planner, max_planner_time=max_planner_time, unit_costs=args.unit, success_cost=success_cost, max_time=max_time, max_memory=max_memory, verbose=True, debug=False, unit_efforts=True, effort_weight=effort_weight, max_effort=INF, # bind=True, max_skeletons=None, search_sample_ratio=search_sample_ratio) saver.restore() # print([(s.cost, s.time) for s in SOLUTIONS]) # print(SOLUTIONS) print_solution(solution) pr.disable() pstats.Stats(pr).sort_stats('tottime').print_stats(25) # cumtime | tottime return solution