def v15_plan(problem, agent): problem.a_star = True problem.rand = True # Only difference from v14 if not hasattr(problem, 'verbose'): problem.verbose = 0 elif problem.verbose == 1: print("Problem State: ") pyhop.print_state(problem) solutions = pyhop.seek_plan_v13(problem,problem.goals[agent],[],[],0, verbose=problem.verbose) # If there is no solution if solutions[0] == False: return solutions return Planner.make_sol_obj(solutions, problem, agent)
def drop(state, agent, store): if state.is_agent[agent] \ and (agent in state.stores) \ and (store == state.stores[agent] \ and (not state.empty[store])): cur_loc = state.at[agent] obj = state.store_has[store] if(obj == None): pyhop.print_state(state) assert(obj != None) state.at[obj] = cur_loc state.store_has.pop(store) state.empty[store] = True return state else: return False
def v17_plan(problem, agent): problem.a_star = True problem.rand = True # Only difference from v14 if not hasattr(problem, 'verbose'): problem.verbose = 0 elif problem.verbose == 1: print("Problem State: ") pyhop.print_state(problem) root = pyhop.seek_bb(problem, problem.goals[agent], verbose=problem.verbose, all_plans=True) # Even though we get the root, this planner imitates the result of a linear planner. solutions = [root.get_plan(rand=True)] if solutions[0] == False: return solutions return Planner.make_sol_obj(solutions, problem, agent)
def on_new_plan_req(agents): pyhop.reset_agents_tasks() pyhop.set_state("robot", state1_r) pyhop.set_state("human", state1_h) for ag, tasks in agents.items(): pyhop.add_tasks(ag, [(t[0], *t[1]) for t in tasks]) pyhop.print_state(pyhop.agents["robot"].state) print(pyhop.agents["robot"].tasks) pyhop.print_methods("robot") pyhop.print_methods("human") sol = [] plans = pyhop.seek_plan_robot(pyhop.agents, "robot", sol) print(sol) rosnode.send_plan(sol)
def simulate_plan_execute(result, state, wait_for_user=False): if wait_for_user: print "\n\nSimulating result, type q to quit\n" tick = 0 data = {} record_state_to_data(state, data, tick) for tuple in result: print tuple operator = pyhop.operators[tuple[0]] state = operator(state, *tuple[1:]) pyhop.print_state(state) record_state_to_data(state, data, tick) tick += 1 if wait_for_user: s = raw_input("") if s == "q": exit(0) print "data: ", data return data
state1_h.isCarrying = {"human": None, "robot": None} state1_h.onStack = [] state1_r = deepcopy(state1_h) goal1_h = pyhop.Goal("goal1_h") goal1_h.isOnStack = {"cube4": True, "cube1": True, "cube6": True} goal1_h.onStack = ["cube4", "cube1", "cube6"] goal1_r = deepcopy(goal1_h) pyhop.set_state("human", state1_h) pyhop.add_tasks("human", [('stack', goal1_h)]) pyhop.set_state("robot", state1_r) pyhop.add_tasks("robot", [('stack', goal1_r)]) pyhop.print_state(pyhop.agents["human"].state) sol = [] plans = pyhop.seek_plan_robot(pyhop.agents, "robot", sol) rosnode = None def on_new_plan_req(agents): pyhop.reset_agents_tasks() pyhop.set_state("robot", state1_r) pyhop.set_state("human", state1_h) for ag, tasks in agents.items(): pyhop.add_tasks(ag, [(t[0], *t[1]) for t in tasks]) pyhop.print_state(pyhop.agents["robot"].state)
def print_state(state): pyhop.print_state(state)