def gbfs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    visited_list = []
    gbfs_queue = []
    intial_heuristic = calculate_heuristic(init_state, goal_state)
    # Storing intial configuration in queue
    heapq.heappush(gbfs_queue, (intial_heuristic,(init_state),[""]))
    start = time.time()
    while gbfs_queue:
        cummulative_cost, current_state, path_from_root = heapq.heappop(gbfs_queue)
        if (current_state in visited_list):
            continue
        visited_list.append(current_state)
        if (current_state == goal_state):
            action_list = path_from_root
            end = time.time()
            print "PATH FOUND"
            print "Time taken in seconds",(end - start)
            return action_list
        else:
            for action in possible_actions:
                #to get the next state, cost for an action on state_x use:
                next_possible_state,current_cost = problem.get_successor(current_state,action)
                if next_possible_state.x != -1 and next_possible_state not in visited_list:
                    next_heuristic = calculate_heuristic(next_possible_state, goal_state)
                    heapq.heappush(gbfs_queue, (next_heuristic,(next_possible_state),path_from_root+[action]))
    print "PATH NOT FOUND"
    return []
def bfs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    visited_list = []
    bfs_queue = [(init_state,[""])]

    start = time.time()
    while (len(bfs_queue) != 0):
        current_state,current_action = bfs_queue.pop(0)
        if (current_state in visited_list):
            continue
        visited_list.append(current_state)
        for action in possible_actions:
            #to get the next state, cost for an action on state_x use:
            (nextstate, cost) = problem.get_successor(current_state, action)
            if (current_state == goal_state):
                action_list = current_action
                end = time.time()
                print "PATH FOUND"
                print "Time taken in seconds",(end - start)
                return action_list
            if nextstate.x != -1:
                bfs_queue.append((nextstate,current_action+[action]))
    print "PATH NOT FOUND"
    return []
示例#3
0
def bfs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []

    visited = []
    state_queue = [
        (init_state, [])
    ]  # state queue maintains list of nodes in the queue on the order they will be visited and the actions
    # needed to reach a state. Maintains tuples of (state, [list of actions to reach state])

    while (len(state_queue) > 0):  # while nodes are yet to be visited

        current_state = state_queue[0][0]
        current_actions = state_queue[0][1]
        state_queue = state_queue[1:]
        visited.append(current_state)

        for action in possible_actions:
            nextstate, cost = problem.get_successor(current_state, action)
            if (nextstate.x == -1 and nextstate.y
                    == -1):  # if action not possible then do not consider
                continue
            if ((nextstate not in visited)
                    and (not in_state_queue(nextstate, state_queue))
                ):  #if state not yet visited or explored, add to queue
                if (problem.is_goal_state(nextstate)
                    ):  #if goal is found then break
                    return current_actions + [action]
                else:
                    state_queue.append((nextstate, current_actions + [action]))

    return []
示例#4
0
def bfs():
    start = time.time()
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    goal_reached = False
    queue = [[(init_state, '')]]
    visited = list()

    while queue:
        path = queue.pop(0)
        (vertex, prev_action) = path[-1]
        if problem.is_goal_state(vertex):
            print("Goal Reached!!")
            path = path[1:]
            goal_reached = True
            break
        elif vertex not in visited:
            for next_action in possible_actions:
                (current_neighbour,
                 cost_for_action) = problem.get_successor(vertex, next_action)
                new_path = list(path)
                new_path.append((current_neighbour, next_action))
                queue.append(new_path)
            visited.append(vertex)

    action_list = [actions for nodes, actions in path]
    if len(action_list) == 0 or not goal_reached:
        print("No path found!!")
        return ['']
    end = time.time()
    print("Time for execution of BFS: ", end - start)
    return action_list
示例#5
0
 def parse_plan(self):
     run_planner_command = self.FD_file_path + " " + self.domain_file_path + " " + self.problem_file_path + " --search \"lazy_greedy([ff()], preferred=[ff()])\""
     process = Popen(run_planner_command, stdout=PIPE, stderr=PIPE, shell=True)
     stdout, stderr = process.communicate()
     print(stdout) #print output of planner
     print(stderr)
     process.wait()
     current_state = problem.get_initial_state()
     actions = []
     f = open(self.plan_path, mode='r')
     for line in f:
         line = line.strip() #remove whitespace
         line = line[1:] #remove brackets
         line = line[:-1]
         args = line.split(' ')
         if(args[0] == "move"): #perform downward refinement
             from_location = args[2]
             to_location = args[3]
             load_locations = self.get_load_locations(to_location)
             move_seq, current_state, goal_reached = self.get_path_gbfs(current_state, load_locations)
             if(to_location == "place_area_location"):
                 current_state, extra_actions = self.face_goal(current_state, load_locations)
                 move_seq = move_seq + extra_actions
             if(not goal_reached):
                 print("Path not possible. Rerun")
                 return
             actions.append(("move", move_seq, current_state))
         elif(args[0] == "pick"):
             cube = args[1]
             actions.append(("pick", cube))
         elif(args[0] == "place"):
             cube = args[1]
             actions.append(("place", cube))
     return actions
示例#6
0
    def __init__(self):
        rospy.init_node('listener', anonymous=True)
        self.json_file_path = r"/home/abhyudaya/catkin_ws/src/planning/cubes.json" #path to cubes.json
        self.plan_path = r"sas_plan" #path to generated plan file
        self.domain_file_path = r'/home/abhyudaya/catkin_ws/src/planning/domain.pddl' #path to domain file
        self.problem_file_path = r'/home/abhyudaya/catkin_ws/src/planning/problem.pddl' #path to problem file
        self.FD_file_path = r'/home/abhyudaya/catkin_ws/src/planning/scripts/FD/fast-downward.py' #path to FD folder
        
        with open(self.json_file_path) as f:
            self.env_data = json.load(f)

        start = timeit.default_timer()
        self.actions = self.parse_plan()
        end = timeit.default_timer()
        print("Plan found in: ", end - start)
        print("******************REFINED ACTION PLAN**************************************")
        for action in self.actions:
            if(action[0] == 'move'):
                print(action[0], action[1], action[2].x, action[2].y, action[2].orientation)
            print(action)
        print("***************************************************************************")
        
        self.action_index = 0
        self.current_state = problem.get_initial_state()
        self.status_subscriber = rospy.Subscriber('/status', String, self.status_callback)
        self.execute_action()
        rospy.spin()
def json_parse_and_read_plan(domain_file, problem_file):
    start_time = time.time()
    os.system((
        "./scripts/FD/fast-downward.py {} {} --search \"lazy_greedy([ff()], preferred=[ff()])\""
    ).format(domain_file, problem_file))
    with open('books.json') as book_parse:
        data = json.load(book_parse)

    next_state = problem.State(0, 0, "EAST")

    sas_file = open("sas_plan", "r")
    for line in sas_file:
        word = line.strip('(').split()

        if word[0] == "move":
            from_location = word[2]
            print from_location
            to_location = word[1][0:len(word[1]) - 5]
            print to_location
            if from_location == "tbot3_init_loc":
                init_state = problem.get_initial_state()
                if to_location[0:1] == 'b':
                    curr_loc = data["books"][to_location]["load_loc"][0]
                    curr_state = problem.State(curr_loc[0], curr_loc[1],
                                               "EAST")
                elif to_location[0:1] == 't':
                    curr_loc = data["bins"][to_location]["load_loc"][0]
                    curr_state = problem.State(curr_loc[0], curr_loc[1],
                                               "EAST")
                action_list = gbfs(init_state, curr_state)
                action_list.pop(0)
                if action_list:
                    next_state = curr_state
                    problem.execute_move_action(action_list)
                    print(action_list)
            else:
                init_state = next_state
                if to_location[0:1] == 'b':
                    curr_loc = data["books"][to_location]["load_loc"][0]
                    curr_state = problem.State(curr_loc[0], curr_loc[1],
                                               "EAST")
                elif to_location[0:1] == 't':
                    curr_loc = data["bins"][to_location]["load_loc"][0]
                    curr_state = problem.State(curr_loc[0], curr_loc[1],
                                               "EAST")
                action_list = gbfs(init_state, curr_state)
                action_list.pop(0)
                if action_list:
                    next_state = curr_state
                    problem.execute_move_action(action_list)
                    print(action_list)
        elif word[0] == "place":
            book_name = word[1]
            bin_name = word[6][0:len(word[6]) - 1]
            problem.execute_place_action(book_name, bin_name, next_state)
        elif word[0] == "pick":
            book_name = word[1]
            problem.execute_pick_action(book_name, next_state)
    end_time = time.time()
def gbfs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []

    # to get the next state, cost for an action on state_x use:
    # (nextstate, cost) = problem.get_successor(state, action)
    frontier = []
    explored = []
    child = []  # To store EXPLORED Nodes [next_state,action,current_state] lists
    heuristic_cost_init = (abs(goal_state.x - init_state.x) + abs(goal_state.y - init_state.y))
    counter = itertools.count()  # unique sequence count
    count = next(counter)
    flag = 0
    heapq.heappush(frontier, [heuristic_cost_init, count, init_state])  # (heuristic,counter, state)
    entry_finder = []  # list with nodes that are in frontier (copy)
    entry_finder.append(init_state)
    # to get the next state, cost for an action on state_x use:
    # (nextstate, cost) = problem.get_successor(state, action)
    while frontier:
        if (flag == 1):
            break
        current_heuristic_cost, current_order, current_state = heapq.heappop(frontier)
        # print("heuristic cost:",current_heuristic_cost)
        entry_finder.remove(current_state)
        explored.append(current_state)
        for action in possible_actions:
            (nextstate, cost) = problem.get_successor(current_state, action)
            if nextstate not in entry_finder and nextstate not in explored and nextstate != -1:
                if (nextstate == goal_state):
                    child.append([nextstate, action, current_state])  # This action takes the bot to the goal
                    flag = 1
                    break
                else:
                    count = next(counter)
                    child.append([nextstate, action, current_state])
                    heuristic_cost = (abs(goal_state.x - nextstate.x) + abs(goal_state.y - nextstate.y))
                    heapq.heappush(frontier, [heuristic_cost, count, nextstate])  # pay attention to list
                    entry_finder.append(nextstate)

    present_node = child[-1][0]  # Goal node
    action_list.append(child[-1][1])
    previous_node = child[-1][2]
    current_child = []

    while (previous_node is not init_state):  # iterate till the Starting node is not reached
        # List comprehension
        current_child = [child_elem for child_elem in child if child_elem[0] == previous_node]
        action_list.append(current_child[0][1])
        previous_node = current_child[0][2]
    # child.remove(current_child) #to speed up the search
    # reversing the action list
    action_list.reverse()

    return action_list
示例#9
0
def astar():
    start = time.time()
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    print "Running"
    explored = []
    queue_search = PriorityQueue()
    curr_cost = manhattan(init_state, goal_state)
    queue_search.put((curr_cost, (init_state, [])))
    flag = 0

    while True:
        #print("inside while")
        #path = queue.pop(0)
        #node = path[-1]
        #current_actions = list_of_actions.pop(0)
        (curr_cost, (curr_node, curr_action_list)) = queue_search.get()
        explored.append(curr_node)

        for action in possible_actions:

            (next_possible_state,
             cost) = problem.get_successor(curr_node, action)

            if next_possible_state.x != -1 and next_possible_state not in explored and queue_search:
                t_action_list = []
                child_cost = manhattan(curr_node, goal_state) + cost
                print(child_cost)
                for temp_action in curr_action_list:
                    t_action_list.append(temp_action)
                t_action_list.append(action)
                queue_search.put(
                    (child_cost, (next_possible_state, t_action_list)))
                if next_possible_state == goal_state:
                    flag = 1
                    action_list = t_action_list
                    print("goal reached")
                    end = time.time()
                    print(end - start)
                    break
        if flag == 1:
            break
    if flag == 0:
        return "no path found"
    else:
        return action_list

    print "outside while"

    if flag == 0:
        return "no path found"
    else:
        return action_list
def bfs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []  # List with moves which takes the bot from start to goal
    frontier = collections.deque([init_state])
    explored = []
    child = []  # To store all [next_state,action,current_state] lists
    flag = 0
    # to get the next state, cost for an action on state_x use:
    # (nextstate, cost) = problem.get_successor(state, action)
    # Comparing the initial state with goal state

    if (init_state == goal_state):
        print("Already present in goal")
        return

    while frontier:
        if (flag == 1):
            break
        current_state = frontier.popleft()
        explored.append(current_state)
        for action in possible_actions:
            (nextstate, cost) = problem.get_successor(current_state, action)
            if nextstate not in frontier and nextstate not in explored and nextstate != -1:
                if nextstate == goal_state:
                    child.append([nextstate, action, current_state])  # This action takes the bot to the goal
                    flag = 1
                    break
                else:
                    child.append([nextstate, action, current_state])
                    frontier.append(nextstate)

    # A list of all the moves which gives (next_state,action,current_state)
    # In Last list next_node is the goal node
    # Retrace till we reach the Starting node (while loop)
    # Problem -- number of elements in the child list depends on the map
    # BFS gaurantees each node is visited only once

    present_node = child[-1][0]  # Goal node
    action_list.append(child[-1][1])
    previous_node = child[-1][2]
    current_child = []

    while (previous_node is not init_state):  # iterate till the Starting node is not reached
        # List comprehension
        current_child = [child_elem for child_elem in child if child_elem[0] == previous_node]
        action_list.append(current_child[0][1])
        previous_node = current_child[0][2]
        child.remove(current_child[0])  # to speed up the search

    # reversing the action list
    action_list.reverse()
    return action_list
示例#11
0
 def __init__(self):
     rospy.init_node('random_walk', anonymous=True)
     self.publisher = rospy.Publisher('/actions', String, queue_size=10)
     self.subscriber = rospy.Subscriber('/status', String, self.callback)
     self.init_state = problem.get_initial_state()
     self.current_state = self.init_state
     self.last_action = None
     rospy.Rate(1).sleep()
     print "Running"
     self.next_action()
     rospy.spin()
示例#12
0
def ucs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []

    state_queue = Queue.PriorityQueue(
    )  # priority queue to retrieve next state based on cost of reaching it
    state_queue.put(
        (0, (init_state, []))
    )  # maintains states in tuples as (cost, (state, [list of actions to reach the state]))
    visited = []
    state_cost = {}

    while (not state_queue.empty()):
        top_item = state_queue.get()
        current_cost = top_item[0]
        current_state = top_item[1][0]
        current_actions = top_item[1][1]

        if (
                current_state in visited
        ):  # since priority queues do not provide functionality for editing cost of existing elements.
            continue  # We can discard old stale states that are already marked visited.

        if (problem.is_goal_state(current_state)):
            break

        visited.append(current_state)
        for action in possible_actions:
            nextstate, cost = problem.get_successor(current_state, action)
            key = (nextstate.x, nextstate.y, nextstate.orientation
                   )  # tuple of state is created to be used in the dictionary
            if (nextstate.x == -1 and nextstate.y == -1):
                continue
            #if a new state is found or the state has lesser cost than it's existing value then we update the cost and add the state to the queue
            if ((nextstate not in visited and key not in state_cost.keys())
                    or (key in state_cost.keys()
                        and current_cost + cost < state_cost[key])):
                state_queue.put((current_cost + cost,
                                 (nextstate, current_actions + [action])))
                state_cost[key] = current_cost + cost

    if (problem.is_goal_state(current_state)):
        action_list = current_actions

    return action_list
示例#13
0
def astar():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []

    state_queue = Queue.PriorityQueue()
    state_queue.put((0 + get_manhattan_distance(init_state, goal_state),
                     (0, init_state, [])))
    visited = []
    state_cost = {}

    while (not state_queue.empty()):
        top_item = state_queue.get()
        current_cost = top_item[1][0]
        current_state = top_item[1][1]
        current_actions = top_item[1][2]

        if (current_state in visited):
            continue

        if (problem.is_goal_state(current_state)):
            break

        visited.append(current_state)
        for action in possible_actions:
            nextstate, cost = problem.get_successor(current_state, action)
            # heuristic = get_manhattan_distance(nextstate, goal_state) # manhattan distance heuristc
            heuristic = get_custom_heuristic(
                nextstate, goal_state)  # custom heuristic for q8.2
            key = (nextstate.x, nextstate.y, nextstate.orientation)
            if (nextstate.x == -1 and nextstate.y == -1):
                continue
            # if a new state is found or an explored state with lower cost value is found then add to queue
            if ((nextstate not in visited and key not in state_cost.keys()) or
                (key in state_cost.keys()
                 and current_cost + cost + heuristic < state_cost[key])):
                state_queue.put((current_cost + cost + heuristic,
                                 (current_cost + cost, nextstate,
                                  current_actions + [action])))
                state_cost[key] = current_cost + cost + heuristic

    if (problem.is_goal_state(current_state)):
        action_list = current_actions

    return action_list
示例#14
0
def bfs():
    start = time.time()
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    print "Running"
    explored = []
    queue_search = queue.Queue()
    queue_search.put((init_state, []))
    flag = 0
    while True:
        print("inside while")
        #path = queue.pop(0)
        #node = path[-1]
        #current_actions = list_of_actions.pop(0)
        (curr_node, curr_action_list) = queue_search.get()
        explored.append(curr_node)

        for action in possible_actions:

            (next_possible_state,
             cost) = problem.get_successor(curr_node, action)

            if next_possible_state not in explored and queue_search:
                t_action_list = []
                for temp_action in curr_action_list:
                    t_action_list.append(temp_action)
                t_action_list.append(action)
                queue_search.put((next_possible_state, t_action_list))
                if next_possible_state == goal_state:
                    print("goal reached")
                    flag = 1
                    end = time.time()
                    print(end - start)
                    action_list = t_action_list
                    break
        if flag == 1:
            break
        end = time.time()
        print(end - start)

    print "outside while"

    return action_list
示例#15
0
def astar():
    start = time.time()
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    goal_reached = False

    def manhatten_heuristic(state):
        return abs(state.x - goal_state.x) + abs(state.y - goal_state.y)

    def euclidian_heuristic(state):
        return ((state.x - goal_state.x)**2 + (state.y - goal_state.y)**2)**0.5

    stack = [(manhatten_heuristic(init_state) + 0, init_state, [(init_state,
                                                                 '')])]

    visited = list()
    while stack:
        stack.sort()
        (heuristic, vertex, path) = stack.pop(0)
        if vertex not in visited:
            if problem.is_goal_state(vertex):
                print("Goal Reached!!")
                path = path[1:]
                goal_reached = True
                break
            visited.append(vertex)
            for next_action in possible_actions:
                (current_neighbour,
                 cost_for_action) = problem.get_successor(vertex, next_action)
                f_of_n = cost_for_action + heuristic - euclidian_heuristic(
                    vertex) + euclidian_heuristic(current_neighbour)
                stack.append((f_of_n, current_neighbour,
                              path + [(current_neighbour, next_action)]))

    action_list = [actions for nodes, actions in path]
    if len(action_list) == 0 or not goal_reached:
        print("No path found!!")
        return ['']
    end = time.time()
    print("Time for execution of A*: ", end - start)
    return action_list
def astar_hs():
    #Tie_breaking_approach
    #In this we nudge the value of h so that f will increase as we move forward to goal
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []
    visited_list = []
    astar_queue = []
    intial_heuristic = calculate_heuristic(init_state, goal_state)
    intial_heuristic *= (1 + 0.03125)
    intial_cost = 0
    # Storing intial configuration in queue
    heapq.heappush(astar_queue, (intial_heuristic+intial_cost,(init_state),[""]))
    start = time.time()
    while astar_queue:
        cummulative_cost, current_state, path_from_root = heapq.heappop(astar_queue)
        if (current_state in visited_list):
            continue
        visited_list.append(current_state)
        if (current_state == goal_state):
            action_list = path_from_root
            end = time.time()
            print "PATH FOUND"
            print "Time taken in seconds",(end - start)
            return action_list
        else:
            for action in possible_actions:
                #to get the next state, cost for an action on state_x use:
                next_possible_state,current_cost = problem.get_successor(current_state,action)
                if next_possible_state.x != -1 and next_possible_state not in visited_list:
                    next_heuristic = calculate_heuristic(next_possible_state, goal_state)
                    next_heuristic *= (1 + 0.03125)
                    heapq.heappush(astar_queue, (next_heuristic+cummulative_cost+current_cost,(next_possible_state),path_from_root+[action]))
    print "PATH NOT FOUND"
    return []
def ucs():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    action_list = []  # List with moves which takes the bot from start to goal
    frontier = []
    explored = []
    child = []  # To store EXPLORED Nodes [next_state,action,current_state] lists
    cost = 0
    counter = itertools.count()  # unique sequence count
    count = next(counter)
    heapq.heappush(frontier, [cost, count, init_state])  # (cost,counter, state)
    entry_finder = []  # list with nodes that are in frontier (copy)
    entry_finder.append(init_state)
    # to get the next state, cost for an action on state_x use:

    # (nextstate, cost) = problem.get_successor(state, action)

    while frontier:
        current_cost, current_order, current_state = heapq.heappop(frontier)
        entry_finder.remove(current_state)

        # Algo Exit section
        if (current_state == goal_state):
            # return the solution here
            # child list last entry is the direction to goal node
            goal_child = [child_elem for child_elem in child if child_elem[0] == goal_state]
            present_node = goal_child[0][0]  # Goal node
            action_list.append(goal_child[0][1])
            previous_node = goal_child[0][2]
            current_child = []

            while (previous_node is not init_state):  # iterate till the Starting node is not reached
                # List comprehension
                current_child = [child_elem for child_elem in child if child_elem[0] == previous_node]
                action_list.append(current_child[0][1])
                previous_node = current_child[0][2]
                child.remove(current_child[0])  # to speed up the search
            # Exit from the other while loop of frontier
            # child <- nextstate,action,current_state
            break

        explored.append(current_state)
        for action in possible_actions:
            (nextstate, cost) = problem.get_successor(current_state, action)
            if nextstate not in entry_finder and nextstate not in explored and nextstate != -1:
                child.append([nextstate, action, current_state])
                count = next(counter)
                # get_successor functions gives cost btw current state and next state
                heapq.heappush(frontier, [current_cost + cost, count, nextstate])  # pay attention to list
                entry_finder.append(nextstate)
            elif nextstate in entry_finder:
                # existing_node_fronteir is a list of list
                existing_node_frontier = [node for node in frontier if node[2] == nextstate]
                if existing_node_frontier[0][0] > current_cost + cost:
                    count = next(counter)
                    frontier.remove(existing_node_frontier[0])
                    frontier.append([current_cost + cost, count, nextstate])
                    # nextstate will be present in child list as well, updating it
                    current_child = [child_elem for child_elem in child if child_elem[0] == nextstate]
                    # remove it from child list
                    child.remove(current_child[0])
                    # Making an updated entry
                    child.append([nextstate, action, current_state])

    # reversing the action list
    action_list.reverse()
    return action_list
示例#18
0
 for line in f:
     # print(line)
     token = line.strip().split(' ')
     if (flag == 0 and token[0] == "ff:" and token[1] == "found"):
         print(line)
         flag = 1
         continue
     if (flag == 1 and token[0] != '' and token[0] != 'time'
             and token[1] != 'seconds'):
         moves_list = action_parser(token)
         print("move_list:", moves_list)
         if (flag == 1 and moves_list[0] == "MOVE"):
             init_state_var = moves_list[1]
             goal_state_var = moves_list[2]
             if "INIT" in init_state_var and "BOOK" in goal_state_var:
                 init_state = problem.get_initial_state()
                 # print("initial state of robot:",init_state)
                 l = [
                     int(s) for s in goal_state_var.split('_')
                     if s.isdigit()
                 ]
                 book_name = "book_" + str(l[0])
                 # print(book_name)
                 book_state = obj_dict["books"][book_name]["load_loc"][
                     0]
                 goal_state = problem.State(book_state[0],
                                            book_state[1], "EAST")
                 print(type(goal_state))
                 actions = astar(init_state, goal_state)
                 print("actions 1st:", actions)
                 problem.execute_move_action(actions)
示例#19
0
def astar():
    init_state = problem.get_initial_state()
    goal_state = problem.get_goal_state()
    possible_actions = problem.get_actions()
    '''
示例#20
0
def ucs():
    init_state = problem.get_initial_state()
    possible_actions = problem.get_actions()
    '''