def bfs(): start = time.time() init_state = problem.get_initial_state() goal_state = problem.get_goal_state() possible_actions = problem.get_actions() action_list = [] goal_reached = False queue = [[(init_state, '')]] visited = list() while queue: path = queue.pop(0) (vertex, prev_action) = path[-1] if problem.is_goal_state(vertex): print("Goal Reached!!") path = path[1:] goal_reached = True break elif vertex not in visited: for next_action in possible_actions: (current_neighbour, cost_for_action) = problem.get_successor(vertex, next_action) new_path = list(path) new_path.append((current_neighbour, next_action)) queue.append(new_path) visited.append(vertex) action_list = [actions for nodes, actions in path] if len(action_list) == 0 or not goal_reached: print("No path found!!") return [''] end = time.time() print("Time for execution of BFS: ", end - start) return action_list
def bfs(): init_state = problem.get_initial_state() goal_state = problem.get_goal_state() possible_actions = problem.get_actions() action_list = [] visited = [] state_queue = [ (init_state, []) ] # state queue maintains list of nodes in the queue on the order they will be visited and the actions # needed to reach a state. Maintains tuples of (state, [list of actions to reach state]) while (len(state_queue) > 0): # while nodes are yet to be visited current_state = state_queue[0][0] current_actions = state_queue[0][1] state_queue = state_queue[1:] visited.append(current_state) for action in possible_actions: nextstate, cost = problem.get_successor(current_state, action) if (nextstate.x == -1 and nextstate.y == -1): # if action not possible then do not consider continue if ((nextstate not in visited) and (not in_state_queue(nextstate, state_queue)) ): #if state not yet visited or explored, add to queue if (problem.is_goal_state(nextstate) ): #if goal is found then break return current_actions + [action] else: state_queue.append((nextstate, current_actions + [action])) return []
def ucs(): init_state = problem.get_initial_state() goal_state = problem.get_goal_state() possible_actions = problem.get_actions() action_list = [] state_queue = Queue.PriorityQueue( ) # priority queue to retrieve next state based on cost of reaching it state_queue.put( (0, (init_state, [])) ) # maintains states in tuples as (cost, (state, [list of actions to reach the state])) visited = [] state_cost = {} while (not state_queue.empty()): top_item = state_queue.get() current_cost = top_item[0] current_state = top_item[1][0] current_actions = top_item[1][1] if ( current_state in visited ): # since priority queues do not provide functionality for editing cost of existing elements. continue # We can discard old stale states that are already marked visited. if (problem.is_goal_state(current_state)): break visited.append(current_state) for action in possible_actions: nextstate, cost = problem.get_successor(current_state, action) key = (nextstate.x, nextstate.y, nextstate.orientation ) # tuple of state is created to be used in the dictionary if (nextstate.x == -1 and nextstate.y == -1): continue #if a new state is found or the state has lesser cost than it's existing value then we update the cost and add the state to the queue if ((nextstate not in visited and key not in state_cost.keys()) or (key in state_cost.keys() and current_cost + cost < state_cost[key])): state_queue.put((current_cost + cost, (nextstate, current_actions + [action]))) state_cost[key] = current_cost + cost if (problem.is_goal_state(current_state)): action_list = current_actions return action_list
def astar(): init_state = problem.get_initial_state() goal_state = problem.get_goal_state() possible_actions = problem.get_actions() action_list = [] state_queue = Queue.PriorityQueue() state_queue.put((0 + get_manhattan_distance(init_state, goal_state), (0, init_state, []))) visited = [] state_cost = {} while (not state_queue.empty()): top_item = state_queue.get() current_cost = top_item[1][0] current_state = top_item[1][1] current_actions = top_item[1][2] if (current_state in visited): continue if (problem.is_goal_state(current_state)): break visited.append(current_state) for action in possible_actions: nextstate, cost = problem.get_successor(current_state, action) # heuristic = get_manhattan_distance(nextstate, goal_state) # manhattan distance heuristc heuristic = get_custom_heuristic( nextstate, goal_state) # custom heuristic for q8.2 key = (nextstate.x, nextstate.y, nextstate.orientation) if (nextstate.x == -1 and nextstate.y == -1): continue # if a new state is found or an explored state with lower cost value is found then add to queue if ((nextstate not in visited and key not in state_cost.keys()) or (key in state_cost.keys() and current_cost + cost + heuristic < state_cost[key])): state_queue.put((current_cost + cost + heuristic, (current_cost + cost, nextstate, current_actions + [action]))) state_cost[key] = current_cost + cost + heuristic if (problem.is_goal_state(current_state)): action_list = current_actions return action_list
def astar(): start = time.time() init_state = problem.get_initial_state() goal_state = problem.get_goal_state() possible_actions = problem.get_actions() action_list = [] goal_reached = False def manhatten_heuristic(state): return abs(state.x - goal_state.x) + abs(state.y - goal_state.y) def euclidian_heuristic(state): return ((state.x - goal_state.x)**2 + (state.y - goal_state.y)**2)**0.5 stack = [(manhatten_heuristic(init_state) + 0, init_state, [(init_state, '')])] visited = list() while stack: stack.sort() (heuristic, vertex, path) = stack.pop(0) if vertex not in visited: if problem.is_goal_state(vertex): print("Goal Reached!!") path = path[1:] goal_reached = True break visited.append(vertex) for next_action in possible_actions: (current_neighbour, cost_for_action) = problem.get_successor(vertex, next_action) f_of_n = cost_for_action + heuristic - euclidian_heuristic( vertex) + euclidian_heuristic(current_neighbour) stack.append((f_of_n, current_neighbour, path + [(current_neighbour, next_action)])) action_list = [actions for nodes, actions in path] if len(action_list) == 0 or not goal_reached: print("No path found!!") return [''] end = time.time() print("Time for execution of A*: ", end - start) return action_list