예제 #1
0
def bfs():
    start = time.time()
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    goal_reached = False
    queue = [[(init_state, '')]]
    visited = list()

    while queue:
        path = queue.pop(0)
        (vertex, prev_action) = path[-1]
        if problem.is_goal_state(vertex):
            print("Goal Reached!!")
            path = path[1:]
            goal_reached = True
            break
        elif vertex not in visited:
            for next_action in possible_actions:
                (current_neighbour,
                 cost_for_action) = problem.get_successor(vertex, next_action)
                new_path = list(path)
                new_path.append((current_neighbour, next_action))
                queue.append(new_path)
            visited.append(vertex)

    action_list = [actions for nodes, actions in path]
    if len(action_list) == 0 or not goal_reached:
        print("No path found!!")
        return ['']
    end = time.time()
    print("Time for execution of BFS: ", end - start)
    return action_list
def gbfs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    visited_list = []
    gbfs_queue = []
    intial_heuristic = calculate_heuristic(init_state, goal_state)
    # Storing intial configuration in queue
    heapq.heappush(gbfs_queue, (intial_heuristic,(init_state),[""]))
    start = time.time()
    while gbfs_queue:
        cummulative_cost, current_state, path_from_root = heapq.heappop(gbfs_queue)
        if (current_state in visited_list):
            continue
        visited_list.append(current_state)
        if (current_state == goal_state):
            action_list = path_from_root
            end = time.time()
            print "PATH FOUND"
            print "Time taken in seconds",(end - start)
            return action_list
        else:
            for action in possible_actions:
                #to get the next state, cost for an action on state_x use:
                next_possible_state,current_cost = problem.get_successor(current_state,action)
                if next_possible_state.x != -1 and next_possible_state not in visited_list:
                    next_heuristic = calculate_heuristic(next_possible_state, goal_state)
                    heapq.heappush(gbfs_queue, (next_heuristic,(next_possible_state),path_from_root+[action]))
    print "PATH NOT FOUND"
    return []
def bfs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    visited_list = []
    bfs_queue = [(init_state,[""])]

    start = time.time()
    while (len(bfs_queue) != 0):
        current_state,current_action = bfs_queue.pop(0)
        if (current_state in visited_list):
            continue
        visited_list.append(current_state)
        for action in possible_actions:
            #to get the next state, cost for an action on state_x use:
            (nextstate, cost) = problem.get_successor(current_state, action)
            if (current_state == goal_state):
                action_list = current_action
                end = time.time()
                print "PATH FOUND"
                print "Time taken in seconds",(end - start)
                return action_list
            if nextstate.x != -1:
                bfs_queue.append((nextstate,current_action+[action]))
    print "PATH NOT FOUND"
    return []
예제 #4
0
def bfs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []

    visited = []
    state_queue = [
        (init_state, [])
    ]  # state queue maintains list of nodes in the queue on the order they will be visited and the actions
    # needed to reach a state. Maintains tuples of (state, [list of actions to reach state])

    while (len(state_queue) > 0):  # while nodes are yet to be visited

        current_state = state_queue[0][0]
        current_actions = state_queue[0][1]
        state_queue = state_queue[1:]
        visited.append(current_state)

        for action in possible_actions:
            nextstate, cost = problem.get_successor(current_state, action)
            if (nextstate.x == -1 and nextstate.y
                    == -1):  # if action not possible then do not consider
                continue
            if ((nextstate not in visited)
                    and (not in_state_queue(nextstate, state_queue))
                ):  #if state not yet visited or explored, add to queue
                if (problem.is_goal_state(nextstate)
                    ):  #if goal is found then break
                    return current_actions + [action]
                else:
                    state_queue.append((nextstate, current_actions + [action]))

    return []
def gbfs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []

    # to get the next state, cost for an action on state_x use:
    # (nextstate, cost) = problem.get_successor(state, action)
    frontier = []
    explored = []
    child = []  # To store EXPLORED Nodes [next_state,action,current_state] lists
    heuristic_cost_init = (abs(goal_state.x - init_state.x) + abs(goal_state.y - init_state.y))
    counter = itertools.count()  # unique sequence count
    count = next(counter)
    flag = 0
    heapq.heappush(frontier, [heuristic_cost_init, count, init_state])  # (heuristic,counter, state)
    entry_finder = []  # list with nodes that are in frontier (copy)
    entry_finder.append(init_state)
    # to get the next state, cost for an action on state_x use:
    # (nextstate, cost) = problem.get_successor(state, action)
    while frontier:
        if (flag == 1):
            break
        current_heuristic_cost, current_order, current_state = heapq.heappop(frontier)
        # print("heuristic cost:",current_heuristic_cost)
        entry_finder.remove(current_state)
        explored.append(current_state)
        for action in possible_actions:
            (nextstate, cost) = problem.get_successor(current_state, action)
            if nextstate not in entry_finder and nextstate not in explored and nextstate != -1:
                if (nextstate == goal_state):
                    child.append([nextstate, action, current_state])  # This action takes the bot to the goal
                    flag = 1
                    break
                else:
                    count = next(counter)
                    child.append([nextstate, action, current_state])
                    heuristic_cost = (abs(goal_state.x - nextstate.x) + abs(goal_state.y - nextstate.y))
                    heapq.heappush(frontier, [heuristic_cost, count, nextstate])  # pay attention to list
                    entry_finder.append(nextstate)

    present_node = child[-1][0]  # Goal node
    action_list.append(child[-1][1])
    previous_node = child[-1][2]
    current_child = []

    while (previous_node is not init_state):  # iterate till the Starting node is not reached
        # List comprehension
        current_child = [child_elem for child_elem in child if child_elem[0] == previous_node]
        action_list.append(current_child[0][1])
        previous_node = current_child[0][2]
    # child.remove(current_child) #to speed up the search
    # reversing the action list
    action_list.reverse()

    return action_list
예제 #6
0
def astar():
    start = time.time()
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    print "Running"
    explored = []
    queue_search = PriorityQueue()
    curr_cost = manhattan(init_state, goal_state)
    queue_search.put((curr_cost, (init_state, [])))
    flag = 0

    while True:
        #print("inside while")
        #path = queue.pop(0)
        #node = path[-1]
        #current_actions = list_of_actions.pop(0)
        (curr_cost, (curr_node, curr_action_list)) = queue_search.get()
        explored.append(curr_node)

        for action in possible_actions:

            (next_possible_state,
             cost) = problem.get_successor(curr_node, action)

            if next_possible_state.x != -1 and next_possible_state not in explored and queue_search:
                t_action_list = []
                child_cost = manhattan(curr_node, goal_state) + cost
                print(child_cost)
                for temp_action in curr_action_list:
                    t_action_list.append(temp_action)
                t_action_list.append(action)
                queue_search.put(
                    (child_cost, (next_possible_state, t_action_list)))
                if next_possible_state == goal_state:
                    flag = 1
                    action_list = t_action_list
                    print("goal reached")
                    end = time.time()
                    print(end - start)
                    break
        if flag == 1:
            break
    if flag == 0:
        return "no path found"
    else:
        return action_list

    print "outside while"

    if flag == 0:
        return "no path found"
    else:
        return action_list
def bfs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []  # List with moves which takes the bot from start to goal
    frontier = collections.deque([init_state])
    explored = []
    child = []  # To store all [next_state,action,current_state] lists
    flag = 0
    # to get the next state, cost for an action on state_x use:
    # (nextstate, cost) = problem.get_successor(state, action)
    # Comparing the initial state with goal state

    if (init_state == goal_state):
        print("Already present in goal")
        return

    while frontier:
        if (flag == 1):
            break
        current_state = frontier.popleft()
        explored.append(current_state)
        for action in possible_actions:
            (nextstate, cost) = problem.get_successor(current_state, action)
            if nextstate not in frontier and nextstate not in explored and nextstate != -1:
                if nextstate == goal_state:
                    child.append([nextstate, action, current_state])  # This action takes the bot to the goal
                    flag = 1
                    break
                else:
                    child.append([nextstate, action, current_state])
                    frontier.append(nextstate)

    # A list of all the moves which gives (next_state,action,current_state)
    # In Last list next_node is the goal node
    # Retrace till we reach the Starting node (while loop)
    # Problem -- number of elements in the child list depends on the map
    # BFS gaurantees each node is visited only once

    present_node = child[-1][0]  # Goal node
    action_list.append(child[-1][1])
    previous_node = child[-1][2]
    current_child = []

    while (previous_node is not init_state):  # iterate till the Starting node is not reached
        # List comprehension
        current_child = [child_elem for child_elem in child if child_elem[0] == previous_node]
        action_list.append(current_child[0][1])
        previous_node = current_child[0][2]
        child.remove(current_child[0])  # to speed up the search

    # reversing the action list
    action_list.reverse()
    return action_list
예제 #8
0
 def random_walk(self):
     action_set = problem.get_actions()
     next_possible_states = list()
     for action in action_set:
         next_possible_state, cost = problem.get_successor(
             self.current_state, action)
         if next_possible_state.x != -1:
             next_possible_states.append((next_possible_state, action))
     rand_number = np.random.choice(range(len(next_possible_states)))
     selected_state, action = next_possible_states[rand_number]
     self.last_action = action
     return selected_state, action
예제 #9
0
def ucs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []

    state_queue = Queue.PriorityQueue(
    )  # priority queue to retrieve next state based on cost of reaching it
    state_queue.put(
        (0, (init_state, []))
    )  # maintains states in tuples as (cost, (state, [list of actions to reach the state]))
    visited = []
    state_cost = {}

    while (not state_queue.empty()):
        top_item = state_queue.get()
        current_cost = top_item[0]
        current_state = top_item[1][0]
        current_actions = top_item[1][1]

        if (
                current_state in visited
        ):  # since priority queues do not provide functionality for editing cost of existing elements.
            continue  # We can discard old stale states that are already marked visited.

        if (problem.is_goal_state(current_state)):
            break

        visited.append(current_state)
        for action in possible_actions:
            nextstate, cost = problem.get_successor(current_state, action)
            key = (nextstate.x, nextstate.y, nextstate.orientation
                   )  # tuple of state is created to be used in the dictionary
            if (nextstate.x == -1 and nextstate.y == -1):
                continue
            #if a new state is found or the state has lesser cost than it's existing value then we update the cost and add the state to the queue
            if ((nextstate not in visited and key not in state_cost.keys())
                    or (key in state_cost.keys()
                        and current_cost + cost < state_cost[key])):
                state_queue.put((current_cost + cost,
                                 (nextstate, current_actions + [action])))
                state_cost[key] = current_cost + cost

    if (problem.is_goal_state(current_state)):
        action_list = current_actions

    return action_list
예제 #10
0
def astar():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []

    state_queue = Queue.PriorityQueue()
    state_queue.put((0 + get_manhattan_distance(init_state, goal_state),
                     (0, init_state, [])))
    visited = []
    state_cost = {}

    while (not state_queue.empty()):
        top_item = state_queue.get()
        current_cost = top_item[1][0]
        current_state = top_item[1][1]
        current_actions = top_item[1][2]

        if (current_state in visited):
            continue

        if (problem.is_goal_state(current_state)):
            break

        visited.append(current_state)
        for action in possible_actions:
            nextstate, cost = problem.get_successor(current_state, action)
            # heuristic = get_manhattan_distance(nextstate, goal_state) # manhattan distance heuristc
            heuristic = get_custom_heuristic(
                nextstate, goal_state)  # custom heuristic for q8.2
            key = (nextstate.x, nextstate.y, nextstate.orientation)
            if (nextstate.x == -1 and nextstate.y == -1):
                continue
            # if a new state is found or an explored state with lower cost value is found then add to queue
            if ((nextstate not in visited and key not in state_cost.keys()) or
                (key in state_cost.keys()
                 and current_cost + cost + heuristic < state_cost[key])):
                state_queue.put((current_cost + cost + heuristic,
                                 (current_cost + cost, nextstate,
                                  current_actions + [action])))
                state_cost[key] = current_cost + cost + heuristic

    if (problem.is_goal_state(current_state)):
        action_list = current_actions

    return action_list
예제 #11
0
    def get_path_gbfs(self, init_state, goal_locations):
        final_state = None
        goal_states = self.build_goal_states(goal_locations)
        goal_reached = False
        for goal_state in goal_states: #search for any of the load locations
            possible_actions = problem.get_actions()
            action_list = []

            state_queue = Queue.PriorityQueue()
            state_queue.put((self.get_manhattan_distance(init_state, goal_state), (init_state, [])))
            visited = []
            state_cost = {}

            while(not state_queue.empty()):
                top_item = state_queue.get()
                current_cost = top_item[0]
                current_state = top_item[1][0]
                current_actions = top_item[1][1]
                
                if(current_state in visited):
                    continue
                
                if(self.is_goal_state(current_state, goal_state)):
                    goal_reached = True
                    break

                visited.append(current_state)
                for action in possible_actions:
                    nextstate, cost = problem.get_successor(current_state, action)
                    cost = self.get_manhattan_distance(nextstate, goal_state) # manhattan distance heuristc
                    key = (nextstate.x, nextstate.y, nextstate.orientation)
                    if(nextstate.x == -1 and nextstate.y == -1):
                        continue
                    # if a new state is found then add to queue
                    if(nextstate not in visited and key not in state_cost.keys()):
                        state_queue.put((cost, (nextstate, current_actions + [action])))
                        state_cost[key] = cost
                    
            if(self.is_goal_state(current_state, goal_state)):
                action_list = current_actions
                final_state = current_state
                goal_reached = True
                break

        return action_list, final_state, goal_reached
예제 #12
0
def bfs():
    start = time.time()
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    print "Running"
    explored = []
    queue_search = queue.Queue()
    queue_search.put((init_state, []))
    flag = 0
    while True:
        print("inside while")
        #path = queue.pop(0)
        #node = path[-1]
        #current_actions = list_of_actions.pop(0)
        (curr_node, curr_action_list) = queue_search.get()
        explored.append(curr_node)

        for action in possible_actions:

            (next_possible_state,
             cost) = problem.get_successor(curr_node, action)

            if next_possible_state not in explored and queue_search:
                t_action_list = []
                for temp_action in curr_action_list:
                    t_action_list.append(temp_action)
                t_action_list.append(action)
                queue_search.put((next_possible_state, t_action_list))
                if next_possible_state == goal_state:
                    print("goal reached")
                    flag = 1
                    end = time.time()
                    print(end - start)
                    action_list = t_action_list
                    break
        if flag == 1:
            break
        end = time.time()
        print(end - start)

    print "outside while"

    return action_list
예제 #13
0
def astar():
    start = time.time()
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    goal_reached = False

    def manhatten_heuristic(state):
        return abs(state.x - goal_state.x) + abs(state.y - goal_state.y)

    def euclidian_heuristic(state):
        return ((state.x - goal_state.x)**2 + (state.y - goal_state.y)**2)**0.5

    stack = [(manhatten_heuristic(init_state) + 0, init_state, [(init_state,
                                                                 '')])]

    visited = list()
    while stack:
        stack.sort()
        (heuristic, vertex, path) = stack.pop(0)
        if vertex not in visited:
            if problem.is_goal_state(vertex):
                print("Goal Reached!!")
                path = path[1:]
                goal_reached = True
                break
            visited.append(vertex)
            for next_action in possible_actions:
                (current_neighbour,
                 cost_for_action) = problem.get_successor(vertex, next_action)
                f_of_n = cost_for_action + heuristic - euclidian_heuristic(
                    vertex) + euclidian_heuristic(current_neighbour)
                stack.append((f_of_n, current_neighbour,
                              path + [(current_neighbour, next_action)]))

    action_list = [actions for nodes, actions in path]
    if len(action_list) == 0 or not goal_reached:
        print("No path found!!")
        return ['']
    end = time.time()
    print("Time for execution of A*: ", end - start)
    return action_list
def astar_hs():
    #Tie_breaking_approach
    #In this we nudge the value of h so that f will increase as we move forward to goal
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    visited_list = []
    astar_queue = []
    intial_heuristic = calculate_heuristic(init_state, goal_state)
    intial_heuristic *= (1 + 0.03125)
    intial_cost = 0
    # Storing intial configuration in queue
    heapq.heappush(astar_queue, (intial_heuristic+intial_cost,(init_state),[""]))
    start = time.time()
    while astar_queue:
        cummulative_cost, current_state, path_from_root = heapq.heappop(astar_queue)
        if (current_state in visited_list):
            continue
        visited_list.append(current_state)
        if (current_state == goal_state):
            action_list = path_from_root
            end = time.time()
            print "PATH FOUND"
            print "Time taken in seconds",(end - start)
            return action_list
        else:
            for action in possible_actions:
                #to get the next state, cost for an action on state_x use:
                next_possible_state,current_cost = problem.get_successor(current_state,action)
                if next_possible_state.x != -1 and next_possible_state not in visited_list:
                    next_heuristic = calculate_heuristic(next_possible_state, goal_state)
                    next_heuristic *= (1 + 0.03125)
                    heapq.heappush(astar_queue, (next_heuristic+cummulative_cost+current_cost,(next_possible_state),path_from_root+[action]))
    print "PATH NOT FOUND"
    return []
예제 #15
0
def astar(init_state, goal_state):
    # init_state = problem.get_initial_state()
    # goal_state = problem.get_book_state("book_40")
    print("goal state:", goal_state)
    possible_actions = problem.get_actions()
    action_list = []  # List with moves which takes the bot from start to goal
    frontier = []
    explored = []
    child = [
    ]  # To store EXPLORED Nodes [next_state,action,current_state] lists
    cost = 0
    counter = itertools.count()  # unique sequence count
    count = next(counter)
    heuristic_cost_init = (abs(goal_state.x - init_state.x) +
                           abs(goal_state.y - init_state.y))
    heapq.heappush(frontier, [cost + heuristic_cost_init, count, init_state
                              ])  # (cost=cost+heuristic,counter, state)
    entry_finder = []  # list with nodes that are in frontier (copy)
    entry_finder.append(init_state)

    # to get the next state, cost for an action on state_x use:
    # (nextstate, cost) = problem.get_successor(state, action)
    while frontier:
        # print("inside frontier")
        current_cost, current_order, current_state = heapq.heappop(frontier)
        entry_finder.remove(current_state)
        heuristic_current_state = (abs(goal_state.x - current_state.x) +
                                   abs(goal_state.y - current_state.y))
        current_cost = current_cost - heuristic_current_state
        # Algo Exit section
        if (current_state == goal_state):
            # print("condition failed\n")
            # return the solution here
            # child list last entry is the direction to goal node
            goal_child = [
                child_elem for child_elem in child
                if child_elem[0] == goal_state
            ]
            present_node = goal_child[0][0]  # Goal node
            action_list.append(goal_child[0][1])
            previous_node = goal_child[0][2]
            current_child = []
            while (previous_node is not init_state
                   ):  # iterate till the Starting node is not reached
                # List comprehension
                current_child = [
                    child_elem for child_elem in child
                    if child_elem[0] == previous_node
                ]
                action_list.append(current_child[0][1])
                previous_node = current_child[0][2]
                child.remove(current_child[0])  # to speed up the search
                # Exit from the other while loop of frontier
                # child <- nextstate,action,current_state
            break
        explored.append(current_state)
        # print('Explored: ',current_state)
        for action in possible_actions:
            (nextstate, cost) = problem.get_successor(current_state, action)
            # print("nextstate:",nextstate)
            if nextstate not in entry_finder and nextstate not in explored and nextstate != -1:
                child.append([nextstate, action, current_state])
                count = next(counter)
                # heuristic for next state
                heuristic_cost_next_state = (abs(goal_state.x - nextstate.x) +
                                             abs(goal_state.y - nextstate.y))
                # get_successor functions gives cost btw current state and next state
                heapq.heappush(
                    frontier, [
                        current_cost + cost + heuristic_cost_next_state, count,
                        nextstate
                    ]
                )  # pay attention to list# I have to substract heuristic from current_cost
                entry_finder.append(nextstate)
            elif nextstate in entry_finder:
                # existing_node_fronteir is a list of list
                existing_node_frontier = [
                    node for node in frontier if node[2] == nextstate
                ]
                if existing_node_frontier[0][
                        0] > current_cost + cost + heuristic_cost_next_state:
                    count = next(counter)
                    frontier.remove(existing_node_frontier[0])
                    frontier.append([
                        current_cost + cost + heuristic_cost_next_state, count,
                        nextstate
                    ])
                    # nextstate will be present in child list as well, updating it
                    current_child = [
                        child_elem for child_elem in child
                        if child_elem[0] == nextstate
                    ]
                    # remove it from child list
                    child.remove(current_child[0])
                    # Making an updated entry
                    child.append([nextstate, action, current_state])

    # reversing the action list
    action_list.reverse()

    return action_list