示例#1
0
def searchGreedy(mapM, droneD, initialX, initialY, finalX, finalY):
    found = False
    visited = []
    squares_to_visit = PriorityQueue()
    squares_to_visit.put((0, [initialX, initialY]))
    destination_square = [finalX, finalY]
    while not found and squares_to_visit:
        if not squares_to_visit:
            return false
        current_square = squares_to_visit.pop()
        visited.append(current_square)
        if current_square == destination_square:
            droneD.x, droneD.y =  destination_square
            found = True
        else:
            aux = []
        current_x, current_y = current_square
        if current_x > 0 and mapM.surface[current_x-1][current_y]==0 and [current_x-1, current_y] not in visited:
            aux.append([current_x-1, current_y])
        if current_x < 19 and mapM.surface[current_x+1][current_y]==0 and [current_x+1, current_y] not in visited:
            aux.append([current_x+1, current_y])
        if current_y > 0 and mapM.surface[current_x][current_y-1]==0 and [current_x, current_y-1] not in visited:
            aux.append([current_x, current_y-1])
        if current_y < 19 and  mapM.surface[current_x][current_y+1]==0 and [current_x, current_y+1] not in visited:
            aux.append([current_x, current_y+1])
        for square in aux:
            squares_to_visit.append(square)
    return visited
示例#2
0
 def rearrangeBarcodes(self, barcodes: List[int]) -> List[int]:
     a = Counter(barcodes)
     print("a=", a)
     ans = []
     pq = PriorityQueue()
     for i, j in a.items():
         pq.append([i, j])
     print("pq=", pq)
     while pq:
         if len(pq) >= 2:
             a = heapq.nlargest(1, pq, key=lambda x: (x[1]))
             b = heapq.nlargest(1, pq, key=lambda x: (x[1]))
             print("a=", a)
             print("b=", b)
             ans.append(a[0])
             ans.append(b[0])
             a[1] -= 1
             b[1] -= 1
             if a[1] > 0:
                 heapq.heappush(pq, a)
             if b[1] > 0:
                 heapq.heappush(pq, b)
         else:
             a = heapq.heappop(pq)
             ans.append(a[0])
     return ans
示例#3
0
def a_star_path_paln(map, start, end):
    start_position = Nodes(None, start)

    end_position = Nodes(None, end)

    visted_NodeList = [] #For node n, visted_NodeList[n] is the node immediately preceding it on the cheapest path from start

    not_visted_NodeList=PriorityQueue()

    not_visted_NodeList.append(start_position)

    while len(not_visted_NodeList) > 0:
        # not_visted_NodeList.sort()
        current = not_visted_NodeList.pop(0)
        visted_NodeList.append(current)

        if current == end_position:
            path =[]
            while current!= end_position:
                path.append(current.currentNude)
                current = current.prviousNode
            return path[::-1]

        (x,y) = current.currentNude

        node_neibers.location = [
        (x-1, y), #up
        (x+1, y), #down
        (x, y-1), #left
        (x, y+1), #right
        ]  
        for ii in range((node_neibers)):
            for jj in range(node_neibers):
				
                map_path = map.get(ii),map.get(jj)
                if (map_path!='0'):
                    continue
                add_node_to_list = Nodes(ii, current)

                if added_node_list in visted_NodeList:
                    pass


 

			# add_node_to_list.G =abs(add_node_to_list.currentNude[0] - start_position.currentNude[0])+abs(add_node_to_list.currentNude[0] - start_position.currentNude[0])
			# add_node_to_list.H =abs(add_node_to_list.currentNude[0] - end_position.currentNude[0])+abs(add_node_to_list.currentNude[0] - end_position.currentNude[0]) 
			# add_node_to_list.F = add_node_to_list.G + add_node_to_list.H 
        add_node_to_list.F[start] = heuristic(start_position.get_location(), end_position.get_location())

        for node in range(not_visted_NodeList):
            if (add_node_to_list == node and add_node_to_list.F >= node.F):
                not_visted_NodeList.append(add_node_to_list)

                    pass
        return None
def dijkstra_modified(cell1: Cell,
                      cell2: Cell,
                      board: Board,
                      you="") -> List[Cell]:
    path = PriorityQueue()
    path.put((0, cell1))
    prev_step = {cell1: None}
    current_cost = {cell1: 0}
    possible_moves_for_other_snakes = possible_head_moves([
        Cell(snake.head.x, snake.head.y) for snake in board.snakes
        if snake.id != you
    ])
    print('Hateeeeeem')
    print(possible_moves_for_other_snakes)
    while not path.empty():
        current = path.get()[1]

        if current == cell2:
            break

        for i in range(4):
            next_cell = Cell(current.x + DX[i], current.y + DY[i])
            if next_cell in possible_moves_for_other_snakes or not is_cell_achievable(
                    next_cell, board, current_cost[current] + 1):
                continue

            new_cost = current_cost[current] + 1
            if next_cell not in current_cost or new_cost < current_cost[
                    next_cell]:
                current_cost[next_cell] = new_cost
                priority = new_cost + dist_between_cells(cell2, next_cell)
                path.put((priority, next_cell))
                prev_step[next_cell] = current

    # restore path
    if cell2 not in current_cost:
        return []

    path = []
    current = cell2
    while current:
        path.append(current)
        current = prev_step[current]

    return path[::-1]
def best_first_graph_search(problem, f):
    """Search the nodes with the lowest f scores first.
    You specify the function f(node) that you want to minimize; for example,
    if f is a heuristic estimate to the goal, then we have greedy best
    first search; if f is node.depth then we have breadth-first search.
    There is a subtlety: the line "f = memoize(f, 'f')" means that the f
    values will be cached on the nodes as they are computed. So after doing
    a best first search you can examine the f values of the path returned."""
    f = memoize(f, 'f')
    node = Node(problem.initial)
    if problem.goal_test(node.state):
        return node
    frontier = PriorityQueue('min', f)
    frontier.append(node)
    explored = set()

    while frontier:

        node = frontier.pop()
        if problem.goal_test(node.state):
            return node
        explored.add(node.state)
        for child in node.expand(problem):
            if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                incumbent = frontier[child]
                if f(child) < f(incumbent):
                    del frontier[incumbent]
                    frontier.append(child)

    return None
class UniformCostSearchStrategy(SearchStrategy):
    '''
    classdocs
    '''
    def __init__(self):
        self.queue = PriorityQueue()

    def isEmpty(self):
        return (len(self.queue)) == 0

    def addNode(self, node):
        return self.queue.append(node)

    def removeNode(self):
        return self.queue.pop()
示例#7
0
文件: ucs.py 项目: shechty2/AI_Ex1
def best_first_graph_search(problem, f):
    node = Node(problem.s_start)
    frontier = PriorityQueue(f)  # Priority Queue
    frontier.append(node)
    closed_list = set()
    while frontier:
        if len(closed_list) % 1000 == 0:
            print(f'size of closed list:{len(closed_list)}')
        node = frontier.pop()
        if problem.is_goal(node.state):
            return node.solution()
        closed_list.add(node.state)
        for child in node.expand(problem):
            if child.state not in closed_list and child not in frontier:
                frontier.append(child)
            elif child in frontier and f(child) < frontier[child]:
                del frontier[child]
                frontier.append(child)
    return None
示例#8
0
def best_first_graph_search(problem, f):
    f = memoize(f, 'f')
    node = Node(problem.initial)
    if problem.goal_test(node.state):
        return node
    frontier = PriorityQueue(min, f)
    frontier.append(node)
    explored = set()
    while frontier:
        node = frontier.pop()
        if problem.goal_test(node.state):
            return node
        explored.add(node.state)
        for child in node.expand(problem):
            if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                incumbent = frontier[child]
                if f(child) < f(incumbent):
                    del frontier[incumbent]
                    frontier.append(child)
    return None
class hClusterer:
    """ this clusterer assumes that the first column of the data is a label
    not used in the clustering. The other columns contain numeric data"""
    
    def __init__(self, filename):
        file = open(filename)
        self.data = {}
        self.counter = 0
        self.queue = PriorityQueue()
        lines = file.readlines()
        file.close()
        header = lines[0].split(',')
        self.cols = len(header)
        self.data = [[] for i in range(len(header))]
        for line in lines[1:]:
            cells = line.split(',')
            toggle = 0
            for cell in range(self.cols):
                if toggle == 0:
                   self.data[cell].append(cells[cell])
                   toggle = 1
                else:
                    self.data[cell].append(float(cells[cell]))
        # now normalize number columns (that is, skip the first column)
        for i in range(1, self.cols):
                self.data[i] = normalizeColumn(self.data[i])

        ###
        ###  I have read in the data and normalized the 
        ###  columns. Now for each element i in the data, I am going to
        ###     1. compute the Euclidean Distance from element i to all the 
        ###        other elements.  This data will be placed in neighbors, which
        ###        is a Python dictionary. Let's say i = 1, and I am computing
        ###        the distance to the neighbor j and let's say j is 2. The
        ###        neighbors dictionary for i will look like
        ###        {2: ((1,2), 1.23),  3: ((1, 3), 2.3)... }
        ###
        ###     2. find the closest neighbor
        ###
        ###     3. place the element on a priority queue, called simply queue,
        ###        based on the distance to the nearest neighbor (and a counter
        ###        used to break ties.
        neighbors={}
        closest = {}
        closestDistance = {}
        for i in range(len(self.data[0])):
            neighbors[i]={}
            dis = float('inf')
            closeneigh=i
            for j in range(len(self.data[0])):
                if i!=j:
                    neighbors[i][j]=((i,j),self.distance(i,j))
                    if neighbors[i][j][1]<dis:
                        dis=neighbors[i][j][1]
                        closeneigh=j
            currentCluster = [self.data[0][i]]
            currentNeigh = self.data[0][j]
            neighList = [currentNeigh,dis,(i,j)]
            tupleForQueue= (dis,i,[currentCluster,neighList],neighbors[i])
            self.queue.append(tupleForQueue)   

    def merge(dic1,dic2):
        dic3 = {}
        assert len(dic1)>0
        assert len(dic2)>0
        for key in dic1.keys():
            if key in dic2:
                op1 = dic1[key][1]
                op2 = dic2[key][1]
                print dic1[key][0]
                print dic2[key][0]
                if op1<op2:
                    dic3[key]=(dic1[key][0],op1)
                else:
                    dic3[key]=(dic2[key][0],op2)
        return dic3 



    def distance(self, i, j):
        sumSquares = 0
        for k in range(1, self.cols):
            sumSquares += (self.data[k][i] - self.data[k][j])**2
        return math.sqrt(sumSquares)
            

    def cluster(self):
        # TODO
        currentIndex = len(self.queue)
        while len(self.queue)>1:
            currentIndex+=1
            clus1 = self.queue.get()
            clus2 = self.queue.get()
            print clus1
            print clus2
            dist1 = clus1[0]
            dist2 = clus2[0]
            ney1= clus1[1]
            ney2= clus2[1]
            if dist1<dist2:
                newDist=dist1
                closestInfo=clus1[2][1]
            else: 
                newDist=dist2
                closestInfo=clus2[2][1]
            list1=clus1[2]
            list2=clus2[2]
            clusName1=list1[0]
            clusName2=list2[0]
            newClus=clusName2+clusName1
            newDic=self.merge(clus1[2][2],clus2[2][2])
            self.queue.append(newDist,currentIndex,[newClus,closestInfo,newDic])
        return self.queue.get()
class NPuzzleSolver:
    """
        Uses the BFS/DFS/A* algos to solve a given n-puzzle board
    """
    initial_state = None
    algo = ""

    # The frontier in BFS/DFS is a queue/stack and for A*, its a heap (PriorityQueue)
    # Frontier set made to test membership in O(1)
    frontier = None
    frontier_set = set()

    # All of the fully explored states in this set
    explored = set()

    def __init__(self, algo, initial_state=None):
        self.initial_state = initial_state
        if algo == "bfs" or algo == "dfs":
            self.frontier = dq()
            self.frontier.append(initial_state)
        else:
            self.frontier = PriorityQueue()
            self.frontier.put(initial_state)
        self.frontier_set.add(initial_state)
        self.algo = algo

    def solve(self):
        """
        Attempts to solve an n-puzzle and returns a stats
        dict, or None if no solution exists
        """
        start_time = time.time()
        maxdepth = 0
        break_cond = False

        while(not break_cond):
            if self.algo == "bfs":
                # Pop the leftmost element if doing a bfs (Queue)
                current_state = self.frontier.popleft()
            elif self.algo == "dfs":
                # Pop the rightmost element if doing a dfs (Stack)
                current_state = self.frontier.pop()
            else:
                # Get element with highest priority if doing A* (Heap)
                current_state = self.frontier.get()
            self.frontier_set.remove(current_state)
            if (self.isFinalState(current_state)):
                soln = self.get_solution_moves(current_state)
                end_time = time.time()
                stats = {}
                stats["nodes_expanded"] = len(self.explored)
                stats["search_depth"] = current_state.depth
                stats["max_search_depth"] = maxdepth
                stats["cost_of_path"] = len(soln)
                stats["time"] = end_time - start_time
                stats["path"] = soln
                return stats
            neighbors = current_state.generate_possible_states()
            if self.algo == "dfs":
                neighbors.reverse()

            for neighbor in neighbors:
                if neighbor not in self.explored and neighbor not in self.frontier_set:
                    if self.algo == "bfs" or self.algo == "dfs":
                        self.frontier.append(neighbor)
                    else:
                        self.frontier.put(neighbor)
                    self.frontier_set.add(neighbor)
                    if neighbor.depth > maxdepth:
                        maxdepth = neighbor.depth
            self.explored.add(current_state)
            if self.algo == "bfs" or self.algo == "dfs":
                frontier_sz = len(self.frontier)
            else:
                frontier_sz = self.frontier.qsize()
            logging.debug("Frontier size = " +
                          str(frontier_sz) +
                          "; Explored size = " +
                          str(len(self.explored)))
            if self.algo == "bfs" or self.algo == "dfs":
                break_cond = len(self.frontier) == 0
            else:
                break_cond = self.frontier.empty()
        logging.error("This is an unsolvable board!")
        return None

    def get_solution_moves(self, final_state):
        """
        Gets the sequence of moves from parent to this state
        """
        moves = dq()
        current_state = final_state
        while current_state is not None:
            if current_state.parent_move is not None:
                moves.appendleft(current_state.parent_move)
            current_state = current_state.parent
        res = []
        [res.append(move) for move in moves]
        return res

    def isFinalState(self, state):
        """
        Checks if this is the final state (0,1,2...m^2-1)
        """
        internal_state = state.tiles
        for i in range(len(internal_state) - 1):
            if internal_state[i] != i:
                return False
        return True
示例#11
0
class Solver(object):
    """Solves a puzzle using one of the following methods:
       BFS --> Breadth First Search
       DFS --> Depth First Search
       AST --> A-star search
       IDA --> Ida-star search
    """
    def __init__(self, method, initialState):
        self.method = method  # method used to solve puzzle
        self.state = Node(initialState)  # instance of State class
        #self.tree = self.state # tree starting from initial configuration
        if self.method == 'bfs':
            self.frontier = deque([self.state], None)
        elif self.method == 'dfs':
            self.frontier = [self.state]  # list of states to be explored
        elif self.method == 'ast':
            self.frontier = PriorityQueue()
            self.frontier.put(self.state)
        elif self.method == 'ida':
            self.frontier = [self.state]
            self.threshold = 1
            self.initialState = Node(initialState)
        self.explored = set()  # list of states already explored
        self.goal = Node(list(range(len(initialState.split(',')))))
        self.pathToGoal = []  # something like ['Up', 'Left', 'Left']
        self.costOfPath = 0
        self.nodesExpanded = 0
        self.fringeSize = 1
        self.maxFringeSize = 0
        self.searchDepth = 0
        self.maxSearchDepth = 0
        self.runningTime = 0.0
        self.maxRamUsage = 0.0
        self.start = process_time()

    def solve(self):
        """Main method for solving puzzle"""

        if self.method == 'bfs':
            retVal = self.bfs()
        elif self.method == 'dfs':
            retVal = self.dfs()
        elif self.method == 'ast':
            retVal = self.ast()
        elif self.method == 'ida':
            retVal = self.ida()
            while retVal is not True:
                self.threshold = self.threshold + 1
                self.frontier = [self.initialState]
                self.explored = set()
                self.nodesExpanded = 0
                self.fringeSize = 1
                retVal = self.ida()
        else:
            raise ValueError('Possible methods are dfs, bfs, ast, ida')

        if not retVal:
            raise RuntimeError('Solver didn\'t reach final state')

        self.runningTime = process_time() - self.start
        self.maxRamUsage = 0
        #resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

    def bfs(self):

        while len(self.frontier) > 0:
            self.state = self.frontier.popleft()
            #print("Current State: " + str(self.state.board.values))
            self.fringeSize -= 1
            self.explored.add(str(self.state.board.values))

            if self.state.testEqual(self.goal):
                self.searchDepth = self.state.depth
                self.costOfPath = self.state.depth
                self.pathToGoal = self.getPathToGoal()
                return True

            for neighbour in self.state.neighbours():
                #if not neighbour.belongs(self.frontier) and not neighbour.belongs(self.explored):
                if str(neighbour.board.values) not in self.explored:
                    self.frontier.append(neighbour)
                    self.explored.add(str(neighbour.board.values))
                    self.fringeSize += 1
                    if neighbour.depth > self.maxSearchDepth:
                        self.maxSearchDepth = neighbour.depth

            self.nodesExpanded += 1

            if self.fringeSize > self.maxFringeSize:
                self.maxFringeSize = self.fringeSize

    def dfs(self):
        while len(self.frontier) > 0:
            self.state = self.frontier.pop()
            #print("Current State:\n" + str(self.state))
            self.fringeSize -= 1
            self.explored.add(str(self.state.board.values))

            if self.state.testEqual(self.goal):
                self.searchDepth = self.state.depth
                self.costOfPath = self.state.depth
                self.pathToGoal = self.getPathToGoal()
                return True

            neighbours = reversed(self.state.neighbours())

            for neighbour in neighbours:
                #if not neighbour.belongs(self.frontier) and not neighbour.belongs(self.explored):
                if str(neighbour.board.values) not in self.explored:
                    self.frontier.append(neighbour)
                    self.explored.add(str(neighbour.board.values))
                    self.fringeSize += 1
                    if neighbour.depth > self.maxSearchDepth:
                        self.maxSearchDepth = neighbour.depth

            self.nodesExpanded += 1

            if self.fringeSize > self.maxFringeSize:
                self.maxFringeSize = self.fringeSize

    def ast(self):
        while self.frontier.qsize() > 0:
            self.state = self.frontier.get()
            #print("Current State:\n" + str(self.state))
            self.fringeSize -= 1
            self.explored.add(str(self.state.board.values))

            if self.state.testEqual(self.goal):
                self.searchDepth = self.state.depth
                self.costOfPath = self.state.depth
                self.pathToGoal = self.getPathToGoal()
                return True

            neighbours = self.state.neighbours()

            for neighbour in neighbours:
                if str(neighbour.board.values) not in self.explored:
                    neighbour.heuristics = neighbour.depth + neighbour.board.manhattanDist(
                    )
                    self.frontier.put(neighbour)
                    self.explored.add(str(neighbour.board.values))
                    self.fringeSize += 1
                    if neighbour.depth > self.maxSearchDepth:
                        self.maxSearchDepth = neighbour.depth

            self.nodesExpanded += 1

            if self.fringeSize > self.maxFringeSize:
                self.maxFringeSize = self.fringeSize

    def ida(self):
        while len(self.frontier) > 0:
            self.state = self.frontier.pop()
            #print("Current State:\n" + str(self.state))
            self.fringeSize = len(self.frontier)
            self.explored.add(str(self.state.board.values))

            if self.state.depth > self.maxSearchDepth:
                self.maxSearchDepth = self.state.depth

            if self.state.testEqual(self.goal):
                self.searchDepth = self.state.depth
                self.costOfPath = self.state.depth
                self.pathToGoal = self.getPathToGoal()
                return True

            neighbours = reversed(self.state.neighbours())

            for neighbour in neighbours:
                #if not neighbour.belongs(self.frontier) and not neighbour.belongs(self.explored):
                if str(neighbour.board.values) not in self.explored:
                    neighbour.heuristics = neighbour.depth + neighbour.board.manhattanDist(
                    )
                    if neighbour.heuristics <= self.threshold:
                        self.frontier.append(neighbour)
                        self.explored.add(str(neighbour.board.values))

            self.fringeSize = len(self.frontier)
            self.nodesExpanded += 1

            if self.fringeSize > self.maxFringeSize:
                self.maxFringeSize = self.fringeSize

    def writeResults(self):
        f = open('output.txt', 'w')
        s = "path_to_goal: " + str(self.pathToGoal) + "\n"
        s += "cost_of_path: " + str(self.costOfPath) + "\n"
        s += "nodes_expanded: " + str(self.nodesExpanded) + "\n"
        s += "fringe_size: " + str(self.fringeSize) + "\n"
        s += "max_fringe_size: " + str(self.maxFringeSize) + "\n"
        s += "search_depth: " + str(self.searchDepth) + "\n"
        s += "max_search_depth: " + str(self.maxSearchDepth) + "\n"
        s += "running_time: " + str(self.runningTime) + "\n"
        s += "max_ram_usage: " + str(self.maxRamUsage)
        f.write(s)
        #print(s)
        f.close()

    def getPathToGoal(self):
        cState = self.state
        path = []
        while cState.action is not None:
            path.append(cState.action)
            cState = cState.parent
        return path[::-1]
示例#12
0
class NPuzzleSolver:
    """
        Uses the BFS/DFS/A* algos to solve a given n-puzzle board
    """
    initial_state = None
    algo = ""

    # The frontier in BFS/DFS is a queue/stack and for A*, its a heap (PriorityQueue)
    # Frontier set made to test membership in O(1)
    frontier = None
    frontier_set = set()

    # All of the fully explored states in this set
    explored = set()

    def __init__(self, algo, initial_state=None):
        self.initial_state = initial_state
        if algo == "bfs" or algo == "dfs":
            self.frontier = dq()
            self.frontier.append(initial_state)
        else:
            self.frontier = PriorityQueue()
            self.frontier.put(initial_state)
        self.frontier_set.add(initial_state)
        self.algo = algo

    def solve(self):
        """
        Attempts to solve an n-puzzle and returns a stats
        dict, or None if no solution exists
        """
        start_time = time.time()
        maxdepth = 0
        break_cond = False

        while (not break_cond):
            if self.algo == "bfs":
                # Pop the leftmost element if doing a bfs (Queue)
                current_state = self.frontier.popleft()
            elif self.algo == "dfs":
                # Pop the rightmost element if doing a dfs (Stack)
                current_state = self.frontier.pop()
            else:
                # Get element with highest priority if doing A* (Heap)
                current_state = self.frontier.get()
            self.frontier_set.remove(current_state)
            if (self.isFinalState(current_state)):
                soln = self.get_solution_moves(current_state)
                end_time = time.time()
                stats = {}
                stats["nodes_expanded"] = len(self.explored)
                stats["search_depth"] = current_state.depth
                stats["max_search_depth"] = maxdepth
                stats["cost_of_path"] = len(soln)
                stats["time"] = end_time - start_time
                stats["path"] = soln
                return stats
            neighbors = current_state.generate_possible_states()
            if self.algo == "dfs":
                neighbors.reverse()

            for neighbor in neighbors:
                if neighbor not in self.explored and neighbor not in self.frontier_set:
                    if self.algo == "bfs" or self.algo == "dfs":
                        self.frontier.append(neighbor)
                    else:
                        self.frontier.put(neighbor)
                    self.frontier_set.add(neighbor)
                    if neighbor.depth > maxdepth:
                        maxdepth = neighbor.depth
            self.explored.add(current_state)
            if self.algo == "bfs" or self.algo == "dfs":
                frontier_sz = len(self.frontier)
            else:
                frontier_sz = self.frontier.qsize()
            logging.debug("Frontier size = " + str(frontier_sz) +
                          "; Explored size = " + str(len(self.explored)))
            if self.algo == "bfs" or self.algo == "dfs":
                break_cond = len(self.frontier) == 0
            else:
                break_cond = self.frontier.empty()
        logging.error("This is an unsolvable board!")
        return None

    def get_solution_moves(self, final_state):
        """
        Gets the sequence of moves from parent to this state
        """
        moves = dq()
        current_state = final_state
        while current_state is not None:
            if current_state.parent_move is not None:
                moves.appendleft(current_state.parent_move)
            current_state = current_state.parent
        res = []
        [res.append(move) for move in moves]
        return res

    def isFinalState(self, state):
        """
        Checks if this is the final state (0,1,2...m^2-1)
        """
        internal_state = state.tiles
        for i in range(len(internal_state) - 1):
            if internal_state[i] != i:
                return False
        return True
示例#13
0
class Fringe:
    """
    A container for nodes. It supports different strategies for saving and
    retrieving the nodes in different orders.
    """
    def __init__(self, strategy):
        self.strategy = strategy
        # Either use a list or a priority queue
        if strategy == SearchStrategy.BREADTH_FIRST or strategy == SearchStrategy.DEPTH_FIRST:
            self.fringe = []
        elif strategy == SearchStrategy.UNIFORM_COST or \
                strategy == SearchStrategy.GREEDY_FIRST or \
                strategy == SearchStrategy.A_STAR:
            self.fringe = PriorityQueue()

    def print_contents(self):
        if DEBUG_V > 1:
            print("current Fringe:")
            if isinstance(self.fringe, list):
                for item in fringe:
                    print("   {}".format(item))
            elif isinstance(self.fringe, PriorityQueue):
                for i in range(len(self.fringe)):
                    print("   {}".format(self.fringe.queue[i]))

    def insert(self, node):
        # Breadth first and depth first: simply append the node to the list
        if self.strategy == SearchStrategy.BREADTH_FIRST:
            self.fringe.append(node)
        elif self.strategy == SearchStrategy.DEPTH_FIRST:
            self.fringe.append(node)
        # Uniform cost: Set the priority to the pathCost
        elif self.strategy == SearchStrategy.UNIFORM_COST:
            self.fringe.put((node.pathCost, node))
        # Greedy first: Set the priority to the heuristic value
        elif self.strategy == SearchStrategy.GREEDY_FIRST:
            self.fringe.put((node.h, node))
        # A*: Set the priority to the pathCost + heuristic value
        elif self.strategy == SearchStrategy.A_STAR:
            self.fringe.put((node.f, node))

    def pop(self):
        # Breadth first: Pop the node from the beginning of the list
        if self.strategy == SearchStrategy.BREADTH_FIRST:
            return self.fringe.pop(0)
        # Depth first: Pop the node from the end of the list
        elif self.strategy == SearchStrategy.DEPTH_FIRST:
            return self.fringe.pop()
        # Other strategies: Get the value from the queue with the lowest
        # priority value
        elif self.strategy == SearchStrategy.UNIFORM_COST:
            return self.fringe.get()[1]
        elif self.strategy == SearchStrategy.GREEDY_FIRST:
            return self.fringe.get()[1]
        elif self.strategy == SearchStrategy.A_STAR:
            return self.fringe.get()[1]

    def extend(self, items):
        for item in items:
            self.insert(item)

    def empty(self):
        if isinstance(self.fringe, list):
            return self.fringe == []
        elif isinstance(self.fringe, PriorityQueue):
            return self.fringe.empty()
示例#14
0
文件: gra.py 项目: EnderGed/Heimdallr
class Game():
	def __init__(self, user_id, teams):
		'''
		phase - np. przed rozpoczeciem, rozpoczeta
		players - wszyscy gracze
		points - slownik; points[team] = (lista, pozycja)
		teams - nazwy druzyn
		'''
		self.phase = 0
		self.players = []
		self.points = None
		self.teams = teams
		self.not_active_bombs = PriorityQueue()
		self.active_bombs = deque()
		self.exploding_bombs = deque()
		self.created_by = user_id
		self.id = self.create_id()
		
	def get_phase(self): 
		return self.phase
	
	def create_id(self):
		return randint(0,255)
	
	#before start
	def add_player(self, ID, messanger, team=0, bomb_limit=10, bombs_in_inventory=10, bomb_radius=100):
		'''
		player = add_player (ID, team, limit_bomb)
		'''
		if self.get_phase() != 0: raise GameError(1)
		for p in self.players: 
			if p.get_ID() == ID:
				raise GameError(3,'player exists')
		
		player = Player(ID, messanger,team,None,bomb_limit,bombs_in_inventory,bomb_radius)
		self.players.append(player)
		return player
		
	def add_team(self, teamname):
		if self.get_phase() != 0: raise GameError(1)
		if len(self.teams) >= 2: raise GameError(2,'too many teams')
		if teamname in self.teams: raise GameError(3,'team exists')
		
		self.teams.append(teamname)
	
	def assign(self, player, teamname):
		'''
		player = ID czy klasa? - jak trzyma serwer 
			(jesli ID, to najpierw trzeba znalezc gracza w players, nie uda sie -> error)
		'''
		print(player)
		for p in self.players:
			print(p)
		if player not in self.players: raise GameError(4,'player does not exist')
		if teamname not in self.teams: raise GameError(4,'team does not exist')
		
		player.set_team(teamname)
		
	def load_points(self):
		
		#check if everything else is done
		if self.get_phase() != 0: raise GameError(1)
		if len(self.teams) != 2: raise GameError(2,'number of teams should be 2: '+str(len(self.teams)))
		if len(self.players) < 1: raise GameError(2,'should have more than 5 players: '+str(len(self.players)))
		for p in self.players:
			if not p.is_ready(): raise GameError(1,'some players not ready')
			
		#all done -> next phase
		self.phase = 1
		self.players = tuple(self.players)
		
			
		l1 = get_points('pointsA.csv')
		l2 = get_points('pointsB.csv')
		self.points = {self.teams[0]: (l1,0), self.teams[1]: (l2,0)}
		self.phase = 2
		
	def start(self):
		if self.get_phase() != 2: raise GameError(1)
		self.phase = 3
		self.Exploder(self).start()
		
		
		
	#game - bombs
	def place_bomb(self, player, position):
		if not player.can_place_bomb():
			raise GameError(1)
		
		bomb = Bomb(position[0], position[1], player)
		self.not_active_bombs.append((time.time()+bomb.time_to_activate, bomb))
		player.bombs_in_inventory -= 1

	#simple timer for managing bombs
	class Exploder(Thread):
		
		def __init__(self,game):
			Thread.__init__(self)
			self.game = game
		
		def run(self):
			while self.game.get_phase() == 3:
				time.sleep(10)
				
				#activate bombs
				try:
					tme, bomb = self.game.not_active_bombs.get_nowait()
					if tme < time.time():
						self.game.active_bombs.append(bomb)
					else:
						self.game.not_active_bombs.put_nowait((tme,bomb))
				except: pass
				
				#explode bombs
				try:
					for tme, bomb in self.game.exploding_bombs:
						if tme < time.time():
							for player in filter(bomb.will_explode, self.game.players):
								player.set_alive(False)
								#send info to player
				except: pass
					

	def update_player(self,player):
		'''
		returns information for the player:
			- BOMBS that are about to blow up and can hurt him
			- a POINT if the player got to his next point
			  or None if he hasn't
		'''
		
		#check for bombs activated by the player
		try:
			for bomb in list(filter(player.is_in_range, self.active_bombs)):
				self.active_bombs.remove(bomb)
				self.exploding_bombs.append((time.time()+bomb.delay, bomb))
		except: pass
		
		#check for bombs "in range"
		bombs = filter(lambda b: b.will_explode(player), self.exploding_bombs)
		
				
		#check if player in his current point
		points, ctr = self.points[player.get_team()]
		if player.is_in_range(points[ctr]):
			point = points[ctr]
		else: point = None
		
		return bombs,point
		
			
	def score_point(self,player):
		'''
		to be called when a player solves a puzzle he got after update_player
		input: player, 
			   point returned by update_player
		returns: player's team,
				 clue on how to get to the next point for this team
		'''
		
		l, ctr = self.points[player.get_team()]
		if ctr == len(l):
			#game won
			return (player.get_team())
		self.points[player.get_team()] = (l,ctr+1)
		return (player.get_team(),l[ctr].clue)
示例#15
0
文件: AStar.py 项目: nimaiji/AICodes
def get_euclidean_heuristics(x: int, y: int, current_goal: tuple, m: Map) -> PriorityQueue:
    heuristics = PriorityQueue()
    for s in m.get_successors(x, y):
        distance = ((x - s[0]) ** 2 + (y - s[1]) ** 2) ** 0.5
        heuristics.append((distance, s))
示例#16
0
class Leader_Memmory:
    """
    unimplement!!!
    1. append(exprience):
    2. get_sample
    """
    def __init__(self, env_info):
        self.n_action = env_info["n_actions"]
        self.n_agent = env_info["n_agents"]
        self.obs_shape = env_info['obs_shape']
        self.max_seq_len = env_info['episode_limit']
        self.state_shape = env_info['state_shape']

        self.PQ = False
        if self.PQ:
            self.trajectories = PriorityQueue()
        else:
            self.trajectories = deque()
        self.max_n_trajextories = 500
        self.max_score = 0
        self.episode_index = 0

        self.current_trajectory = {
            'observation': [],
            'state': [],
            'available_action': [],
            'joint_action': [],
            'action_onehot': [],
            'reward': [],
            'score': 0
        }

        self.preprocess = {
            'joint_action': {
                'action_onehot': self.action_onehot
            }
        }

    def action_onehot(self, joint_action):
        one_hot = th.zeros((self.n_agent, self.n_action))
        # print(joint_action.unsqueeze(1))
        one_hot = one_hot.scatter(dim=1,
                                  index=joint_action.unsqueeze(1),
                                  source=1)
        # one_hot = one_hot.scatter(dim=1, index=joint_action.unsqueeze(1), value=1)
        # print(one_hot)
        return one_hot

    def append(self, exprience):
        for key in exprience:
            if key in self.current_trajectory:
                self.current_trajectory[key].append(exprience[key])
            if key in self.preprocess:
                for new_key in self.preprocess[key]:
                    preprocesser = self.preprocess[key][new_key]
                    self.current_trajectory[new_key].append(
                        preprocesser(exprience[key]))

    def end_trajectory(self, exprience):
        self.append(exprience)
        self.current_trajectory['score'] = exprience['eps_reward']

        if self.PQ:
            self.trajectories.put((exprience['eps_reward'], self.episode_index,
                                   copy.deepcopy(self.current_trajectory)))
        else:
            self.trajectories.append(copy.deepcopy(self.current_trajectory))
        self.episode_index += 1
        self.current_trajectory.clear()

        self.current_trajectory = {
            'observation': [],
            'state': [],
            'available_action': [],
            'joint_action': [],
            'action_onehot': [],
            'reward': [],
            'score': 0
        }
        self.max_score = max(self.max_score, exprience['eps_reward'])
        if self.PQ:
            if self.trajectories.queue.__len__() > self.max_n_trajextories:
                self.trajectories.get()
        else:
            if self.trajectories.__len__() > self.max_n_trajextories:
                self.trajectories.popleft()

    def get_item(self, e):
        if self.PQ:
            _, _, trajectory = self.trajectories.queue[e]
        else:
            trajectory = self.trajectories[e]
        trajectory_len = len(trajectory['observation'])
        fill_len = self.max_seq_len + 1 - trajectory_len
        mask = th.zeros(self.max_seq_len)
        mask[:trajectory_len - 1] = 1
        mask = mask.expand(self.n_agent, -1)
        done = th.zeros(self.max_seq_len)
        done[trajectory_len - 2:] = 1
        done = done.expand(self.n_agent, -1)
        observation = th.FloatTensor(trajectory['observation'])
        observation = th.cat(
            (observation, th.zeros((fill_len, self.n_agent, self.obs_shape))))
        reward = th.FloatTensor(trajectory['reward'])
        reward = th.cat((reward, th.zeros(fill_len))).expand(self.n_agent, -1)
        action = th.stack(trajectory['joint_action'])
        action = th.cat(
            (action, th.zeros((fill_len, self.n_agent), dtype=th.long)))
        action_onehot = th.stack(trajectory['action_onehot'])
        action_onehot = th.cat(
            (action_onehot, th.zeros((fill_len, self.n_agent, self.n_action))))
        action_avail = th.FloatTensor(trajectory['available_action'])
        action_avail = th.cat(
            (action_avail, th.zeros((fill_len, self.n_agent, self.n_action))))
        return mask, done, observation, reward, action, action_onehot, action_avail

    def get_sample(self, batch_size=32):
        """
        目前来看 seq_len 都是max,但后面会不会有不同 如果不足就需要补充
        :param batch_size:
        :return:
        """
        obs_batch = []
        avail_batch = []
        act_batch = []
        rew_batch = []
        action_onehot_batch = []
        mask_batch = []
        done_batch = []

        if self.PQ:
            trajectory_len = self.trajectories.queue.__len__()
        else:
            trajectory_len = self.trajectories.__len__()

        samlpe_new_memory = batch_size // 4
        new_memory = trajectory_len // 4

        for i in range(batch_size - samlpe_new_memory):
            e = -rd.randint(1, new_memory)
            mask, done, observation, reward, action, action_onehot, action_avail = self.get_item(
                e)
            mask_batch.append(mask)
            done_batch.append(done)
            obs_batch.append(observation)
            rew_batch.append(reward)
            act_batch.append(action)
            action_onehot_batch.append(action_onehot)
            avail_batch.append(action_avail)

        for i in range(samlpe_new_memory):
            e = rd.randint(0, trajectory_len - 1)
            mask, done, observation, reward, action, action_onehot, action_avail = self.get_item(
                e)
            mask_batch.append(mask)
            done_batch.append(done)
            obs_batch.append(observation)
            rew_batch.append(reward)
            act_batch.append(action)
            action_onehot_batch.append(action_onehot)
            avail_batch.append(action_avail)

        batch = {
            'observation': th.stack(obs_batch),
            'available_action': th.stack(avail_batch),
            'action': th.stack(act_batch),
            'action_onehot': th.stack(action_onehot_batch),
            'reward': th.stack(rew_batch),
            'done': th.stack(done_batch),
            'mask': th.stack(mask_batch),
            'len': self.max_seq_len + 1,
            'batch_size': batch_size
        }

        return batch

    def show_memory(self):
        message = "agent ".format(self.index)
        self.log.info(message)
        Signal.get_signal().emit_signal_str(message)

        if self.PQ:
            for _, _, t in self.trajectories.queue:
                message = "len: {}; score: {}".format(len(t['observation']),
                                                      t['score'])
                self.log.info(message)
                Signal.get_signal().emit_signal_str(message)

    def get_current_trajectory(self):
        if self.current_trajectory['action_onehot'] == []:
            current_action_onehot = []
        else:
            current_action_onehot = th.stack(
                self.current_trajectory['action_onehot']).unsqueeze(0)
        batch = {
            'observation':
            th.FloatTensor([self.current_trajectory['observation']]),
            'action_onehot': current_action_onehot,
            'len': len(self.current_trajectory['observation']),
            'batch_size': 1
        }
        return batch
示例#17
0
def best_first_graph_search(problem, f):

    # we use these two variables at the time of visualisations
    iterations = 0
    all_node_colors = []
    node_colors = dict(initial_node_colors)

    f = memoize(f, 'f')
    node = Node(problem.initial)

    node_colors[node.state] = "red"
    iterations += 1
    all_node_colors.append(dict(node_colors))

    if problem.goal_test(node.state):
        node_colors[node.state] = "green"
        iterations += 1
        all_node_colors.append(dict(node_colors))
        return (iterations, all_node_colors, node)

    frontier = PriorityQueue(min, f)
    frontier.append(node)

    node_colors[node.state] = "orange"
    iterations += 1
    all_node_colors.append(dict(node_colors))

    explored = set()
    while frontier:
        node = frontier.pop()

        node_colors[node.state] = "red"
        iterations += 1
        all_node_colors.append(dict(node_colors))

        if problem.goal_test(node.state):
            node_colors[node.state] = "green"
            iterations += 1
            all_node_colors.append(dict(node_colors))
            return (iterations, all_node_colors, node)

        explored.add(node.state)
        for child in node.expand(problem):
            if child.state not in explored and child not in frontier:
                frontier.append(child)
                node_colors[child.state] = "orange"
                iterations += 1
                all_node_colors.append(dict(node_colors))
            elif child in frontier:
                incumbent = frontier[child]
                if f(child) < f(incumbent):
                    del frontier[incumbent]
                    frontier.append(child)
                    node_colors[child.state] = "orange"
                    iterations += 1
                    all_node_colors.append(dict(node_colors))

        node_colors[node.state] = "blue"
        iterations += 1
        all_node_colors.append(dict(node_colors))
    return None