def is_sub_goal_reached(self): if not self.has_goal(): msg_server_comment("Agent {} has no goal!".format(self.agent_key)) return False c_box = self.current_state.boxes.get(self.box_key[0])[self.box_key[1]] agent = self.current_state.agents.get(self.agent_key) return abs(c_box[0] - agent[0]) + abs(c_box[1] - agent[1]) == 1
def is_goal_reached(self): if not self.has_goal(): msg_server_comment("Agent {} has no goal!".format(self.agent_key)) return False c_box = self.current_state.boxes.get(self.box_key[0])[self.box_key[1]] g_box = self.goal_state.boxes.get(self.box_key[0])[self.box_key[1]] return c_box[0] == g_box[0] and c_box[1] == g_box[1]
def solve_level(self): # Create agents self.agents = [] # need to be sorted because of the format of the joint actions the server can read for char in sorted(self.initial_state.agents.keys()): self.agents.append(Agent(self.initial_state, char)) # Assign goal to agents solutions = [] for char, values in self.goal_state.boxes.items(): steps = [] for value in values: # assigning goals to an agent for all boxes in a colour _, _, box_color = value # catch exception for box without agent of the same color try: key_agent = [ x for x, y in self.initial_state.agents.items() if y[2] == box_color ][0] key_agent = int(key_agent) # assigning a goal to the agent if he doesn't have any if self.agents[key_agent].has_goal(): msg_server_comment( "Agent {} has already a goal".format(key_agent)) else: self.agents[key_agent].assign_goal( self.goal_state, (char, values.index(value))) result = self.agents[key_agent].find_path_to_goal( self.walls) if result is not None and len(result) > 0: steps.extend(result) solutions.append(steps) except IndexError: continue # handling the fact that some agents might have no goal by adding an empty # list to their corresponding position that will be padded with NoOp actions if len(solutions) != len(self.agents): for i, agent in enumerate(self.agents): if not agent.has_goal(): solutions.insert(i, []) return solutions
def find_path_to_goal(self, walls): if not self.has_goal(): return None agent = self.current_state.agents.get(self.agent_key) c_box = self.current_state.boxes.get(self.box_key[0])[self.box_key[1]] g_box = self.find_a_goal_cell() final_actions = [] # Find path to current box path = self.path_finder.calc_route(walls, (agent[0], agent[1]), (c_box[0], c_box[1]), self.current_state) if path is not None: msg_server_comment("Found path from agent {} to box {}".format( self.agent_key, self.box_key)) # First complete the sub goal - getting agent to box while not self.is_sub_goal_reached(): dir_values = self.get_direction_values(agent, path) child_state = self.current_state.get_child( walls, dir_values[0], self.agent_key, None, None) if child_state is not None: self.current_state = child_state agent = self.current_state.agents.get(self.agent_key) else: msg_server_err( "FAILED to create child state from: {}, {}".format( agent, dir_values[0])) break # Find path to goal box path = self.path_finder.calc_route(walls, (c_box[0], c_box[1]), (g_box[0], g_box[1]), self.current_state) if path is not None: msg_server_comment("Found path from box {} to goal box".format( self.box_key)) flip_transition = False if self.current_state.is_free( walls, g_box[0], g_box[1] ) or agent[0] == g_box[0] and agent[1] == g_box[1]: # Second complete main goal - move box to goal while not self.is_goal_reached(): agent_dir_values = self.get_direction_values( agent, path) agent_dir_value = agent_dir_values[0] box_dir_values = self.get_direction_values(c_box, path) box_dir_value = box_dir_values[0] # When an agent needs to pull box towards goal, make sure agent dir is not towards box is_agent_on_goal = path[agent[0]][agent[1]] == path[ g_box[0]][g_box[1]] if is_agent_on_goal: if agent_dir_value[2][0] == c_box[ 0] and agent_dir_value[2][1] == c_box[1]: agent_dir_value = agent_dir_values[1] # When a box should be pulled, the direction needs to be mirrored because e.g. Pull(S,S) is invalid if agent_dir_value[1] > box_dir_value[ 1] and not flip_transition or is_agent_on_goal: is_pull_flip_needed = True # Check for possibility of flipping to push zero_count = Counter( elem[1] for elem in agent_dir_values)[0] if zero_count < 2: # Make sure the chosen direction will not be in the direction of the box for i in range(1, len(agent_dir_values)): item = agent_dir_values[i] if DIR_MIRROR.get( item[0]) is not box_dir_value[0]: agent_dir_value = item flip_transition = True # Edge case: New agent dir is same as box dir and the action should not be pull # In this case, no need to flip box dir below if agent_dir_value[0] is box_dir_value[ 0]: if agent_dir_value[ 0] == 'N' and agent[ 0] > c_box[0]: is_pull_flip_needed = False elif agent_dir_value[ 0] == 'E' and agent[ 1] < c_box[1]: is_pull_flip_needed = False elif agent_dir_value[ 0] == 'S' and agent[ 0] < c_box[0]: is_pull_flip_needed = False elif agent_dir_value[ 0] == 'W' and agent[ 1] > c_box[1]: is_pull_flip_needed = False break # Make sure the box direction is to agent's current pos. # Relies on the grid to have 2 pairs of identical values. If no longer true, need loop if box_dir_value[2][0] != agent[ 0] and box_dir_value[2][1] != agent[1]: box_dir_value = box_dir_values[1] # Mirror box direction for server to interpret if is_pull_flip_needed: box_dir_value = (DIR_MIRROR.get( box_dir_value[0]), box_dir_value[1], box_dir_value[2]) else: if flip_transition: flip_transition = False # Force the agent's direction to current pos of box, making it push the box if agent_dir_value[2][0] != c_box[ 0] or agent_dir_value[2][1] != c_box[1]: for i in range(1, len(agent_dir_values)): item = agent_dir_values[i] if item[2][0] == c_box[0] and item[2][ 1] == c_box[1]: agent_dir_value = item break child_state = self.current_state.get_child( walls, agent_dir_value, self.agent_key, box_dir_value, self.box_key) if child_state is not None: self.current_state = child_state agent = self.current_state.agents.get( self.agent_key) c_box = self.current_state.boxes.get( self.box_key[0])[self.box_key[1]] else: msg_server_err( "FAILED to create child state from: Agent {} [{}], Box {} [{}]" .format(agent, agent_dir_value, c_box, box_dir_value)) break final_actions = self.current_state.extract_plan() return final_actions
def print_path(self): # Printing to server console doesnt look very nice, it gets confused with new lines # Only use this for debugging purposes msg_server_comment(self.val_grid)
def main(args): level_data = None # Read server messages from stdin. msg_server_action("Starfish") level_data = sys.stdin # Create client using server messages starfish_client = Client(level_data) current_state = starfish_client.initial_state walls = starfish_client.walls # Solve and print # TODO: configuration when an agent is blocking all the others solution = starfish_client.solve_level() if isListEmpty(solution): coop = Cooperation(current_state, starfish_client.goal_state, starfish_client.walls) queries = coop.get_needed_coop() solution = starfish_client.queries_to_action(queries, current_state) nb_agents = len(solution) printer = ";".join(['{}'] * nb_agents) verified = False while (not isListEmpty(solution)) or (verified == False): missing_goals = get_missing_goals(current_state, starfish_client.goal_state) if len(missing_goals) == 0: verified = True for i, elt in enumerate(solution): if len(elt) == 0 and str(i) not in missing_goals: padding_state = current_state solution[i].append(padding_state) solution[i][-1].action = Action(ActionType.NoOp, None, None) elif len(elt) == 0 and str( i ) in missing_goals: # and not starfish_client.agents[i].has_goal(): starfish_client.agents[i].current_state = current_state starfish_client.agents[i].assign_goal( starfish_client.goal_state, (missing_goals[str(i)][0], 0)) new_path = starfish_client.agents[i].find_path_to_goal( starfish_client.walls) if new_path is not None and len(new_path) > 0: solution[i].extend(new_path) else: padding_state = current_state solution[i].append(padding_state) solution[i][-1].action = Action(ActionType.NoOp, None, None) # grabbing state for each agent state = [elt[0] for elt in solution] joint_action = [agent.action for agent in state] index_non_applicable, current_state, is_applicable = check_action( joint_action, current_state, walls) msg_server_comment( printer.format(*joint_action) + " - applicable: {}".format(is_applicable)) # if there is a conflict between agents if not is_applicable: conflict = Conflict(current_state, index_non_applicable, joint_action, solution, walls) agents, actions = conflict.handle_conflicts() joint_action = [Action(ActionType.NoOp, None, None)] * nb_agents for (agent, action) in zip(agents, actions): if action is not None: joint_action[int(agent)] = action # forgetting goal in order to help fix the conflict padding_state = current_state solution[int(agent)] = [padding_state] solution[int(agent)][-1].action = Action( ActionType.NoOp, None, None) solution[int(agent)].append(solution[int(agent)][-1]) starfish_client.agents[int(agent)].forget_goal() else: box_key = starfish_client.agents[int(agent)].box_key starfish_client.agents[int(agent)] = Agent( current_state, agent) starfish_client.agents[int(agent)].assign_goal( starfish_client.goal_state, box_key) new_path_to_goal = starfish_client.agents[int( agent)].find_path_to_goal(walls) solution[int(agent)] = new_path_to_goal # print(new_path_to_goal) # sys.exit() # updating state _, current_state, _ = check_action(joint_action, current_state, walls) msg_server_comment("New action: " + printer.format(*joint_action)) msg_server_action(printer.format(*joint_action)) if is_applicable: for i, elt in enumerate(solution): elt.pop(0) response = level_data.readline().rstrip() if 'false' in response: msg_server_err("Server answered with error to the action " + printer.format(*joint_action))
def __init__(self, initial_state: 'State', goal_state: 'State'): self.action_orders = defaultdict(list) self.unique_action_count = 0 self.actions = {} self.subgoals = [] self.links = [] self.create_action_blueprints(initial_state, goal_state) # Instantiate start and end actions end_action = self.actions.get("End").instantiate_from_blueprint( self.unique_action_count) box_keys = list(goal_state.boxes.keys()) agent_keys = list(goal_state.agents.keys()) b_curr = 0 b_key = 0 a_key = 0 for precon in end_action.preconditions: context_entity = None for arg in precon.arguments: if arg.name == "box": arg.value = (box_keys[b_key], b_curr) context_entity = goal_state.boxes.get( box_keys[b_key])[b_curr] if b_curr < len(goal_state.boxes.get(box_keys[b_key])) - 1: b_curr += 1 else: b_key += 1 b_curr = 0 elif arg.name == "agent": arg.value = agent_keys[a_key] context_entity = goal_state.agents.get(agent_keys[a_key]) a_key += 1 elif arg.name == "row": arg.value = context_entity[0] elif arg.name == "col": arg.value = context_entity[1] self.subgoals.append(PlanSubGoal(precon, end_action)) self.unique_action_count += 1 start_action = self.actions.get("Start").instantiate_from_blueprint( self.unique_action_count) box_keys = list(initial_state.boxes.keys()) agent_keys = list(initial_state.agents.keys()) b_curr = 0 b_key = 0 a_key = 0 a_pair_count = 0 for eff in start_action.effects: context_entity = None for arg in eff.arguments: if arg.name == "box": arg.value = (box_keys[b_key], b_curr) context_entity = initial_state.boxes.get( box_keys[b_key])[b_curr] if b_curr < len(initial_state.boxes.get( box_keys[b_key])) - 1: b_curr += 1 else: b_key += 1 b_curr = 0 elif arg.name == "agent": arg.value = agent_keys[a_key] context_entity = initial_state.agents.get( agent_keys[a_key]) a_pair_count += 1 if a_pair_count % 2 == 0: a_pair_count = 0 a_key += 1 elif arg.name == "row": arg.value = context_entity[0] elif arg.name == "col": arg.value = context_entity[1] # Create ordering constraint ordering = PlanActionOrdering(start_action, end_action) self.action_orders[start_action.action_id].append( (end_action.action_id, ordering)) self.unique_action_count += 1 msg_server_comment("Starting subgoals:") for subgoal in self.subgoals: msg_server_comment(subgoal.state)