def start(): # print(method.get()) if method.get() == "A*算法": astar() else: ga()
def find_path(start, end, mesh, grid, tilesize=(16,16)): """ Uses astar to find a path from start to end, using the given mesh and tile grid. >>> grid = [[0,0,0,0,0],[0,0,0,0,0],[0,0,1,0,0],[0,0,0,0,0],[0,0,0,0,0]] >>> mesh = make_nav_mesh([(2,2,1,1)],(0,0,4,4),1) >>> find_path((0,0),(4,4),mesh,grid,(1,1)) [(4, 1), (4, 4)] """ # If there is a straight line, just return the end point if not line_intersects_grid(start, end, grid, tilesize): return [end] # Copy mesh so we can add temp nodes mesh = copy.deepcopy(mesh) # Add temp notes for start mesh[start] = dict([(n, point_dist(start,n)) for n in mesh if not line_intersects_grid(start,n,grid,tilesize)]) # Add temp nodes for end: if end not in mesh: endconns = [(n, point_dist(end,n)) for n in mesh if not line_intersects_grid(end,n,grid,tilesize)] for n, dst in endconns: mesh[n][end] = dst neighbours = lambda n: mesh[n].keys() cost = lambda n1, n2: mesh[n1][n2] goal = lambda n: n == end heuristic = lambda n: ((n[0]-end[0]) ** 2 + (n[1]-end[1]) ** 2) ** 0.5 nodes, length = astar(start, neighbours, goal, 0, cost, heuristic) return nodes
def geometric_tortuosity(maze): """ The geometric tortuosity of a porous medium returns :param maze: :return geometric tortuosity: """ path_star_list = path_star(maze) path_end_list = path_end(maze) total_caminos = [] total_paths = len(path_end(maze)) * len(path_star(maze)) unit_caminos = 0 array_path = np.array(maze) line = (array_path.shape)[1] for star in path_star_list: caminos = [] for end in path_end_list: path = astar(maze, star, end) result = 0 for i in range(len(path) - 1): add = math.sqrt((path[i][0] - path[i + 1][0])**2 + (path[i][1] - path[i + 1][1])**2) result += add caminos.append(result) unit_caminos += 1 total_caminos.append(min(caminos)) valor = (np.mean(np.array(total_caminos))) geometric_tortusity = valor / (int(line) - 1) return geometric_tortusity
def go_to_coin(self, coord): # coord : [x, y] (pixels) target_x = int(coord[0] / TILESIZE) target_y = int(coord[1] / TILESIZE) target = (target_y, target_x) current_x = int(self.x / TILESIZE) current_y = int(self.y / TILESIZE) current = (current_y, current_x) # (row, col) if len(self.path) == 0: #print() start_x = int(self.x / TILESIZE) start_y = int(self.y / TILESIZE) start = (start_y, start_x) grid = self.game.map.grid self.path = astar(grid, start, target) else: next_move = self.path[0] if current_x == next_move[1] and current_y == next_move[0]: self.path = self.path[1:] else: self.vx = PLAYER_SPEED * (next_move[1] - current_x) self.vy = PLAYER_SPEED * (next_move[0] - current_y)
def setup(): global path, i, analise size(colunas * tam_quadro, linhas * tam_quadro) no_stroke() start = (0, 1) end = (20, 39) path, analise = astar(M, start, end)
def astar_path_length(m, start, end): """ Length of a path from start to end """ neighbours = lambda n: m[n].keys() cost = lambda n1, n2: m[n1][n2] goal = lambda n: n == end heuristic = lambda n: point_dist(end, n) nodes, length = astar(start, neighbours, goal, 0, cost, heuristic) return length
def estrella( self, array ): #Llamar al algoritmo a estrella con los siguientes parametros start = (0, 0) end = (7, 7) path = astar(array, start, end) path.append((0, 0)) print(path) return path
def checkFunction(): global posY, posX, posZ, moves, end #maze jest obrocony czyli x to y a y to x maze = [[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], #0 row [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], #1 [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], #2 [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], #3 [-1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1], #4 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #5 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #6 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #7 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #8 [-1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1], #9 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #10 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #11 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #12 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #13 [-1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1], #14 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #15 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #16 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #17 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #18 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #19 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #20 [-1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1], #21 [-1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1], #22 [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]] #23 start = (posY, posX, posZ) # ( Y , X ) end = (4, 16, 1) path = astar(maze, start, end) print(path) after = (0,0) for i in path: if i == start: print('Start') after = i else: before = after after = i if (after[0] - before[0]) > 0: print("DOWN") moves.append('D') elif(after[0] - before[0]) < 0: print("UP") moves.append('U') else: if (after[1] - before[1]) > 0: print("Right") moves.append('R') elif (after[1] - before[1]) < 0: print("Left") moves.append('L') else: print("Obrót") moves.reverse() print(moves)
def find_path(self): """Find path towards current goal.""" j = JeuRecherche(self.pos, self.goal.pos, distManhattan, self.dir_vecs, self.dims, self.walls) self.path = astar(j) self.current = 0 if (self.verbose): print("Player {} moving towards Restaurant {}.".format( self.id, self.goal.id))
def main(): if len(sys.argv) != 2: print "usage: python sdmaze.py maze_file" return print 'Reading in ' + sys.argv[1] + '...\n' data = read_maze(sys.argv[1]) if data[0] == None or data[1] == None: print "Invalid board configuration. Board must have start and stop goals." return; die = Die(data[0], data[1], data[2], None, 1, 2, 3) manhattan_results, manhattan_analysis = astar(die, manhattanDistance) straight_line_results, straight_line_analysis = astar(die, straightLineDistance) direction_results, direction_analysis = astar(die, directionHeuristic) for results in \ [("Manhattan", manhattan_results), ("Straight line", straight_line_results), ("Direction", direction_results)]: print '---', results[0], '---' if results[1] != None: for res in results[1]: print res print '\nIt takes %s moves to solve the puzzle' % len(results[1]) else: print 'The puzzle is impossible' print '' for analysis in \ [("Manhattan", manhattan_analysis), ("Straight line", straight_line_analysis), ("Direction", direction_analysis)]: print '---', analysis[0], '---' print '\t%s nodes generated' % analysis[1][0] print '\t%s nodes visited' % analysis[1][1]
def movePink(self): currPos = self.ghostPink.getPos() targetPos = self.pacman.getPos() self.ghostPink.setPath(astar(currPos, targetPos, self.maze)) dirX = dirY = 0 temp = self.ghostPink.followPath() if temp: dirX = temp[0] dirY = temp[1] if self.maze.isEmptyTile(currPos[0] + dirX, currPos[1] + dirY): self.maze.setTile(NONE, currPos[0], currPos[1]) self.ghostPink.move(dirX, dirY) currPos = self.ghostPink.getPos() self.maze.setTile(PINK, currPos[0], currPos[1])
def difference_explored(dimension, density): total_difference = 0 for i in range(0, 100): maze_trial = generate_maze(dimension, density) # generate maze status, maze_copy, num_explored_bfs = bfs( maze_trial, (0, 0), (dimension - 1, dimension - 1)) # run bfs and get number of explored nodes status, maze_copy, num_explored_astar = astar( maze_trial, (0, 0), (dimension - 1, dimension - 1)) # run A* and get number of explored nodes total_difference += (num_explored_bfs - num_explored_astar) return (total_difference / 100)
def moveBlue(self): currPos = self.ghostBlue.getPos() targetPos = self.maze.countDots() self.ghostBlue.setPath(astar(currPos, targetPos, self.maze)) dirX = dirY = 0 temp = self.ghostBlue.followPath() if temp: dirX = temp[0] dirY = temp[1] if self.maze.isEmptyTile(currPos[0] + dirX, currPos[1] + dirY): self.maze.setTile(NONE, currPos[0], currPos[1]) self.ghostBlue.move(dirX, dirY) currPos = self.ghostBlue.getPos() self.maze.setTile(BLUE, currPos[0], currPos[1])
def test_path_algorithms(): dimension = int(sys.argv[1]) density = float(sys.argv[2]) cmap = colors.ListedColormap(color_set) norm = colors.BoundaryNorm(range_set, len(color_set)) plt.figure(figsize=(8, 8)) plt.axis('off') maze = generate_maze(dimension, density) # generate maze # run each algorithm on maze and time them start_time = time.time() status, astar_maze, num_explored_nodes = astar( maze, (0, 0), (dimension - 1, dimension - 1)) end_time = time.time() print(status) print(end_time - start_time) print(num_explored_nodes) start_time = time.time() status, bfs_maze, num_explored_nodes = bfs(maze, (0, 0), (dimension - 1, dimension - 1)) end_time = time.time() print(status) print(end_time - start_time) print(num_explored_nodes) start_time = time.time() status, dfs_maze = dfs(maze, (0, 0), (dimension - 1, dimension - 1)) end_time = time.time() print(status) print(end_time - start_time) # plot each result and save as image plt.imshow(astar_maze, cmap=cmap, norm=norm) plt.savefig('astar_maze.jpg') plt.imshow(bfs_maze, cmap=cmap, norm=norm) plt.savefig('bfs_maze') plt.imshow(dfs_maze, cmap=cmap, norm=norm) plt.savefig('dfs_maze')
def visualize(f): scale = 600 / BOUND map = Map(f, scale) p = Problem(map) pygame.init() pygame.display.set_caption('A* Pathfinding') clock = pygame.time.Clock() screen = pygame.display.set_mode((map.scale * BOUND, map.scale * BOUND)) draw_map(screen, map) path = astar(p) if path: update_map(screen, map, path, clock) print 'Note: lines and costs are scaled by %r to fill the window' % scale else: print 'No solution...' while 1: # stay open after solve quit_handler() msElapsed = clock.tick(FPS)
def do_astar(): tdm = np.load('tdm.npy') print(tdm.shape) row, col, _ = tdm.shape maze = [] for i in range(row): m = [1 for _ in range(col)] maze.append(m) for i in range(row): for j in range(col): cond1 = (tdm[i, j, 0] == 128 and tdm[i, j, 1] == 128 and tdm[i, j, 2] == 128) cond2 = (tdm[i, j, 0] == 0 and tdm[i, j, 1] == 0 and tdm[i, j, 2] == 0) if (cond1 or cond2): maze[i][j] = 0 maze = np.array(maze) start, end = get_start_and_end(tdm) path = astar(maze, start, end) print(path) for p in path: r, c = p[0], p[1] tdm[r, c, :] = [255, 0, 0] tdm[start[0], start[1], :] = [0, 255, 0] tdm[end[0], end[1], :] = [0, 0, 255] plt.imshow(tdm) plt.axis('off') plt.tight_layout() plt.show() plt.pause(2) plt.close()
def test_a_star(): id_digraph = { 'A': ['B', 'G'], 'B': ['A', 'C', 'G', 'D'], 'C': ['B', 'D'], 'D': ['C', 'B', 'F', 'E'], 'E': ['D', 'F'], 'F': ['E', 'D', 'G'], 'G': ['A', 'B', 'F'] } id_to_data = { 'A': nodedata(0, 0, 0), 'B': nodedata(0, 1, 0), 'C': nodedata(0, 2, 0), 'D': nodedata(2, 2, 0), 'E': nodedata(3, 2, 0), 'F': nodedata(2, -1, 0), 'G': nodedata(1, 0, 0), } (optimal_path, cost) = astar(toblers, toblers_heuristic, id_digraph, id_to_data, 'A', 'E') assert optimal_path == ['A', 'B', 'D', 'E']
def moveOrange(self): currPos = self.ghostOrange.getPos() targetPos = self.pacman.getPos() dirX = dirY = 0 # Pacman is in the same column if currPos[0] == targetPos[0]: if currPos[1] - targetPos[1] < -1: dirY += 1 elif currPos[1] - targetPos[1] > 1: dirY -= 1 # Pacman is in the same row elif currPos[1] == targetPos[1]: if currPos[0] - targetPos[0] < -1: dirX += 1 elif currPos[0] - targetPos[0] > 1: dirX -= 1 # Pacman not spotted else: targetPos = self.ghostOrange.getNextWp() if currPos == targetPos: self.ghostOrange.setNextWp() targetPos = self.ghostOrange.getNextWp() self.ghostOrange.setPath(astar(currPos, targetPos, self.maze)) temp = self.ghostOrange.followPath() if temp: dirX = temp[0] dirY = temp[1] if self.maze.isEmptyTile(currPos[0] + dirX, currPos[1] + dirY): self.maze.setTile(NONE, currPos[0], currPos[1]) self.ghostOrange.move(dirX, dirY) currPos = self.ghostOrange.getPos() self.maze.setTile(ORNG, currPos[0], currPos[1])
from sys import argv #import all the objects in initGraph, not just import initGraph from initGraph import * #from bfs import * from bfs import * from dfs import * from astar import * from datetime import datetime startTime = datetime.now() # the arguments from user inputs. script, city1, city2, routingopt, algoopt= argv #print G.node[city1] #print G.node[city2] # 'is' is to test if the two object is exactly the same, # while == is for the testing of the value of stings themselves. if algoopt == 'bfs': bfs(city1,city2,routingopt) elif algoopt == 'dfs': dfs(city1,city2,routingopt) elif algoopt == 'astar': astar(city1,city2,routingopt) else: print 'Only bfs, dfs, and astar are supported!' #print algoopt print 'Run time: %s'%(str(datetime.now() - startTime)) print '=================================================================='
def AStar(self, map): pos = [int(self.pos[0]), int(self.pos[1])] self.path = astar(map.map, pos, map.exit) return
def __init__(self): rospy.init_node("robot") # self.move_list = read_config()["move_list"] self.move_list = read_config()["move_list"] self.mapSize = read_config()["map_size"] self.start = read_config()["start"] self.goal = read_config()["goal"] self.walls = read_config()["walls"] self.pits = read_config()["pits"] self.cost = read_config()["reward_for_each_step"] rospy.sleep(1) self.pathPub = rospy.Publisher("/results/path_list", AStarPath, queue_size=10) self.completePub = rospy.Publisher("/map_node/sim_complete", Bool, queue_size=10) self.mdpPub = rospy.Publisher("/results/policy_list", PolicyList, queue_size=10) rospy.sleep(3) pathList = astar(self.move_list, self.mapSize, self.start, self.goal, self.walls, self.pits, self.cost) print pathList for item in pathList: print item self.pathPub.publish(item) rospy.sleep(1) print "should publish" self.mdp = mdp() self.mdp.looping() self.QlearningEpsilon = QLearningEpislon() self.QlearningLValue = QLearningLValue() self.QlearningWithUncertainty = QLearningWithUncertainty() self.QlearningEpsilon.learning() self.QlearningLValue.learning() self.QlearningWithUncertainty.learning() policy = self.mdp.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy = self.QlearningEpsilon.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy = self.QlearningLValue.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy = self.QlearningWithUncertainty.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) self.completePub.publish(True) rospy.sleep(1) rospy.signal_shutdown("finish")
else: for it3 in range(0,9): best_numbers[it3]=most_frequent(cells_buffer[it3]) cells_buffer = [[],[],[],[],[],[],[],[],[]] count_tolerance = 0 """ print(numbers, "Reconocimiento ") if len(numbers) == len(set(numbers)): if not compare(temp_best, numbers): print("cambio : ") temp_best = numbers inputAstar = ''.join([str(elem) for elem in numbers]) print(inputAstar) resultado = astar(inputAstar) print("res ", resultado) if resultado == []: print("No hay solución") else: print("resultado") print(resultado[0]) paintCorners(image, corners) cv2.imshow("wraperd", wrapedImage) cv2.imshow("images", image) except: cv2.imshow("images", image) if cv2.waitKey(1) == 27:
lines = lines[3:] gridBoard = [] # reading in spawn pool line for i in range(len(SpawnPool)): SpawnPool[i] = int(SpawnPool[i]) # Reading in height for i in range(0, Height): gridBoard.append(lines[i].split()) # reading in inital game board state for i in range(len(gridBoard)): for j in range(len(gridBoard[i])): gridBoard[i][j] = int(gridBoard[i][j]) spawnFrequencyWeighting = spawnFrequencyMapping(SpawnPool) # # Inital Starting game board state StartingBoard = GameBoard(gridBoard, Width, Height, SpawnPool, "", ScoreGoal, ScoreGoal) # # Main program Driver and AI start_time = datetime.now() Final_Board = astar(StartingBoard, DIRECTION, ScoreGoal, spawnFrequencyWeighting, SpawnPool, algoSelection) print(Microsecond(start_time)) if (Final_Board is not None): Final_Board.print_board()
def run_simulation(self): actions = astar(self.puzzle,self.goalState,heuristic_evaluation) print actions for action in actions: self.update_puzzle(action)
if num_wrong_tiles(s2) != 5: print "Error: Wrong number of tiles" else: print "." if manhattan_distance(s) != 4: print "Error: Manhattan Distance calculated incorrectly" else: print "." if itdeep(s) != [(6,3), (3,4)]: print "Error: Iterative Deepening calculating wrong answer" else: print "." ''' if astar(s, num_wrong_tiles) != [(6,3), (3,4)]: print "Error: Astar incorrect with num_wrong_tiles heur." else: print "." if astar(s, manhattan_distance) != [(6,3), (3,4)]: print "Error: Astar incorrect with manhattan_distance heur." else: print "." s = state([1,2,3,7,5,8,0,6,4]) ''' if num_wrong_tiles(s) != 4: print "Error: Wrong number of tiles" else:
def MainLoop(self): pygame.key.set_repeat(1, 20) vScale = 0.5 # Prepare Text to Be output on Screen font = pygame.font.SysFont("DejaVuSans Mono",14) mapCounter = 0 while 1: leftVel = 0 rightVel = 0 for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() elif event.type == KEYDOWN: if ((event.key == K_ESCAPE) or (event.key == K_q)): sys.exit() key = pygame.key.get_pressed() if key[K_RIGHT]: leftVel = leftVel + 0.40 rightVel = rightVel - 0.40 elif key[K_LEFT]: leftVel = leftVel - 0.40 rightVel = rightVel + 0.40 elif key[K_UP]: leftVel = leftVel + 0.65 rightVel = rightVel + 0.65 elif key[K_DOWN]: leftVel = leftVel - 0.65 rightVel = rightVel - 0.65 else: leftVel = 0.0 rightVel = 0.0 cmd = maebot_diff_drive_t() cmd.motor_right_speed = vScale * rightVel cmd.motor_left_speed = vScale * leftVel self.lc.publish("MAEBOT_DIFF_DRIVE",cmd.encode()) elif event.type == pygame.MOUSEBUTTONDOWN: command = velocity_cmd_t() command.Distance = 987654321.0 if event.button == 1: if ((event.pos[0] > 438) and (event.pos[0] < 510) and (event.pos[1] > 325) and (event.pos[1] < 397)): command.FwdSpeed = 80.0 command.Distance = 1000.0 command.AngSpeed = 0.0 command.Angle = 0.0 print "Commanded PID Forward One Meter!" elif ((event.pos[0] > 438) and (event.pos[0] < 510) and (event.pos[1] > 400) and (event.pos[1] < 472)): command.FwdSpeed = -100.0 command.Distance = -1000.0 command.AngSpeed = 0.0 command.Angle = 0.0 print "Commanded PID Backward One Meter!" elif ((event.pos[0] > 363) and (event.pos[0] < 435) and (event.pos[1] > 400) and (event.pos[1] < 472)): command.FwdSpeed = 0.0 command.Distance = 0.0 command.AngSpeed = 25.0 command.Angle = 90.0 print "Commanded PID Left One Meter!" elif ((event.pos[0] > 513) and (event.pos[0] < 585) and (event.pos[1] > 400) and (event.pos[1] < 472)): command.FwdSpeed = 0.0 command.Distance = 0.0 command.AngSpeed = -25.0 command.Angle = -90.0 print "Commandsed PID Right One Meter!" elif ((event.pos[0] > 513) and (event.pos[0] < 585) and (event.pos[1] > 325) and (event.pos[1] < 397)): pid_cmd = pid_init_t() #pid_cmd.kp = 0.00225 # CHANGE FOR YOUR GAINS! pid_cmd.kp = 0.003 pid_cmd.ki = 0.00001 # See initialization # pid_cmd.kd = 0.000045 pid_cmd.kd = 0.00045 pid_cmd.iSat = 0.0 self.lc.publish("GS_PID_INIT",pid_cmd.encode()) print "Commanded PID Reset!" if (command.Distance != 987654321.0): self.lc.publish("GS_VELOCITY_CMD",command.encode()) # #Handle doing guidance # if self.guidance.state == -1: # if not self.failed_path or (time.time() - self.last_failed_path) > 5: # #Generate a new path # startpos = world_to_astar(self.odometry[1], self.odometry[0]) # endpos = (250, 350) # path = astar(self.datamatrix.getMapMatrix(), startpos, endpos) # if len(path) > 0: # print "Found a path" # self.failed_path = False # pruned_path = [path[0], path[min(5, len(path)-1)]] # self.guidance.plan = map(lambda x: astar_to_world(x[1], x[0]), pruned_path) # #print path # print self.guidance.plan # self.guidance.start() # else: # print "Failed to find a path" # self.failed_path = True # self.last_failed_path = time.time() # else: # self.guidance.guide_and_command((self.odometry[1], self.odometry[0], math.radians(self.odometry[2]))) #Handle doing guidance - Kevin if self.guidance.state == -1: if not self.failed_path or (time.time() - self.last_failed_path) > 1: #Generate a new path startpos = world_to_astar(self.odometry[1], self.odometry[0]) #(x,y) -> y*,x* #endpos=(250.250) #robot will start at (3500,3500) (x,y) #endpos = world_to_astar(3030,5500) #x, y - >y*,x* endpos = world_to_astar(2500,3500) print "Starting at: ", startpos, "Ending at: ", endpos #print "Checking for a path. Printing..." self.odom_path.append(world_to_astar(self.odometry[1], self.odometry[0])) #print "odom:", self.odom_path path = astar(self.datamatrix.getMapMatrix(), startpos, endpos, False) if len(path) > 0: print "Found a path. This is the guidance:" self.failed_path = False #pruned_path = [path[min(2, len(path)-1)], path[min(5, len(path)-1)]] #pruned_path = [path[min(5, len(path)-2)], path[min(10, len(path)-2)]] # #self.guidance.plan = map(lambda x: astar_to_world(x[1], x[0]), pruned_path) #y*,x* -> x, y #print self.guidance.plan #print self.guidance.plan next_point = path[min(len(path)-1, 5)] print next_point self.guidance.next_point = astar_to_world(next_point[1], next_point[0]) print self.guidance.next_point #print self.guidance.next_point #self.taken_path.append(next_point) self.guidance.start() #print "Planned:", self.taken_path #numpy.save("failed_path.npy", self.datamatrix.getMapMatrix()) else: print "Failed to find a path" self.failed_path = True self.last_failed_path = time.time() #numpy.save("temp_path_%d.npy"%(self.count), self.datamatrix.getMapMatrix()) #print self.odometry pid_cmd = pid_init_t() #pid_cmd.kp = 0.00225 # CHANGE FOR YOUR GAINS! pid_cmd.kp = 0.003 pid_cmd.ki = 0.00001 # See initialization # pid_cmd.kd = 0.000045 pid_cmd.kd = 0.00045 pid_cmd.iSat = 0.0 self.lc.publish("GS_PID_INIT",pid_cmd.encode()) # else: ## print "ODO" ## print (self.odometry[1], self.odometry[0]) ## print "Guidance" ## print self.guidance.plan ## self.guidance.guide_and_command((self.odometry[1], self.odometry[0], math.radians(self.odometry[2]))) # self.path_failed = True # self.last_failed_path = time.time() self.screen.fill((255,255,255)) # Plot Lidar Scans plt.cla() self.ax.plot(self.thetas,self.ranges,'or',markersize=2) self.ax.set_rmax(1.5) self.ax.set_theta_direction(-1) self.ax.set_theta_zero_location("N") self.ax.set_thetagrids([0,45,90,135,180,225,270,315], labels=['','','','','','','',''], frac=None,fmt=None) self.ax.set_rgrids([0.5,1.0,1.5],labels=['0.5','1.0',''], angle=None,fmt=None) canvas = agg.FigureCanvasAgg(self.fig) canvas.draw() renderer = canvas.get_renderer() raw_data = renderer.tostring_rgb() size = canvas.get_width_height() surf = pygame.image.fromstring(raw_data, size, "RGB") self.screen.blit(surf, (320,0)) #Put map on screen if self.map_init == True: image = pygame.image.load("current_map.png") image = pygame.transform.scale(image, (320, 320)) self.screen.blit(image, (0,0)) # Position and Velocity Feedback Text on Screen self.lc.handle() pygame.draw.rect(self.screen,(0,0,0),(5,350,300,120),2) text = font.render(" POSITION ",True,(0,0,0)) self.screen.blit(text,(10,360)) text = font.render("x: %.2f [mm]" % (self.odometry[1]),True,(0,0,0)) self.screen.blit(text,(10,390)) text = font.render("y: %.2f [mm]" % (self.odometry[0]),True,(0,0,0)) self.screen.blit(text,(10,420)) text = font.render("t: %.2f [rad]" % (self.odometry[2]),True,(0,0,0)) self.screen.blit(text,(10,450)) text = font.render(" VELOCITY ",True,(0,0,0)) self.screen.blit(text,(150,360)) text = font.render("dxy/dt: %.2f [mm/us]" % (self.dxy / self.dt * 1000000),True,(0,0,0)) self.screen.blit(text,(150,390)) text = font.render("dth/dt: %.2f [deg/us]" % (self.dtheta / self.dt * 1000000),True,(0,0,0)) self.screen.blit(text,(150,420)) text = font.render("dt: %d [s]" % (self.dt/1000000),True,(0,0,0)) self.screen.blit(text,(150,450)) # Plot Buttons self.screen.blit(self.arrowup,(438,325)) self.screen.blit(self.arrowdown,(438,400)) self.screen.blit(self.arrowleft,(363,400)) self.screen.blit(self.arrowright,(513,400)) self.screen.blit(self.resetbut,(513,325)) pygame.display.flip()
def plan(self,des): self.path = astar(to_grid(self.pos),to_grid(des)) self.path = map(to_pix,self.path)
from greedy import * from astar import * from ac3 import * from backtracking import * from sudoku import * if __name__ == '__main__': # Lendo argumentos: filename = sys.argv[1] algorithm = sys.argv[2] if algorithm == "bfs": for board in read_board(filename): print(bfs(board)) elif algorithm == "dfs": for board in read_board(filename): print(dfs(board)) elif algorithm == "greedy": for board in read_board(filename): print(greedy(board, h1)) elif algorithm == "astar": for board in read_board(filename): print(astar(board, g, h1)) elif algorithm == "ac3": for board in read_board(filename): csp = CSP(board) print(AC3(csp, csp.queue())) elif algorithm == "backtracking": for board in read_board(filename): print(backtracking(CSP(board)))
""" import numpy import matplotlib.pyplot as plt import matplotlib.cm as cm from astar import * #def initShowPath() m = numpy.load("./temp_path_0.npy") filter_mat = sum_filter(ROBO_DIAMETER, ROBO_DIAMETER) valid_map = scipy.signal.convolve2d(m, filter_mat, 'same') #odompath = [(350, 350), (355, 354), (356, 354), (351, 361), (351, 346), (342, 335)] #path = [(355, 355), (360, 359), (361, 359), (356, 366), (356, 351)] #[pathx, pathy] = zip(*path) #[ox, oy] = zip(*odompath) #imgplot = plt.imshow(m, cmap=cm.Greys_r) #def showPath() imgplot = plt.imshow(valid_map) #plt.plot(pathy, pathx, 'red') #plt.plot(oy, ox, 'green') #p = astar(m, path[-1], (450, 350), False) p2 = astar(m, (350, 350), (450, 350), False) #px, py] = zip(*p) [p2x, p2y] = zip(*p2) #plt.plot(py, px, 'pink') plt.plot(p2y, p2x, 'orange') plt.show()
def get_astar(pos1, pos2, mapa): path = astar(mapa.map, pos1, pos2) if len(path) < 2: path[1] = path[0] return moveToWalls(path[0], path[1], mapa)
def __init__(self): rospy.init_node("robot") # self.move_list = read_config()["move_list"] self.move_list = read_config()["move_list"] self.mapSize = read_config()["map_size"] self.start = read_config()["start"] self.goal = read_config()["goal"] self.walls = read_config()["walls"] self.pits = read_config()["pits"] self.cost = read_config()["reward_for_each_step"] rospy.sleep(1) self.pathPub = rospy.Publisher("/results/path_list",AStarPath,queue_size=10) self.completePub = rospy.Publisher("/map_node/sim_complete",Bool,queue_size=10) self.mdpPub = rospy.Publisher("/results/policy_list",PolicyList,queue_size=10) rospy.sleep(3) pathList = astar(self.move_list,self.mapSize,self.start,self.goal,self.walls,self.pits,self.cost) print pathList for item in pathList: print item self.pathPub.publish(item) rospy.sleep(1) print "should publish" self.mdp = mdp() self.mdp.looping() self.QlearningEpsilon = QLearningEpislon() self.QlearningLValue = QLearningLValue() self.QlearningWithUncertainty = QLearningWithUncertainty() self.QlearningEpsilon.learning() self.QlearningLValue.learning() self.QlearningWithUncertainty.learning() policy = self.mdp.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy = self.QlearningEpsilon.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy =self.QlearningLValue.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy =self.QlearningWithUncertainty.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) self.completePub.publish(True) rospy.sleep(1) rospy.signal_shutdown("finish")
from image import Image from astar import * image = Image('./img/bigmaze.png') start = (0, 3) end = (40, 31) image.serialize() image.refresh() image.solve(astar(image.img, start, end)) image.save()
def createMap(graph, src, dest): # Membuat Mapbox, dengan bantuan Plotly lintang = [] bujur = [] fig = go.Figure( go.Scattermapbox(mode="markers+lines", lon=[], lat=[], marker={'size': 10})) astaresult = astar(graph[getIndexFromName(graph, src)], graph[getIndexFromName(graph, dest)], graph) astaresult2 = astar(graph[getIndexFromName(graph, dest)], graph[getIndexFromName(graph, src)], graph) result = astaresult[0] for i in graph: # Melakukan Iterasi untuk setiap node pada graph lintang.append(i.name[2]) # Memasukkan nilai lintang pada sebuah list bujur.append(i.name[1]) # Memasukkan nilai buju pada sebuah list for x, y in i.adjacentNodes.items( ): # Melakukkan iterasi untuk setiap Node yang dapat diakses if (i.name[0] in result and x[0] in result): # Membuat garis berwarna merah untuk result fig.add_trace( go. Scattermapbox( # Membuat Garis pada mapbox, berwarna merah untuk result mode="lines", lon=[i.name[2], x[2]], lat=[i.name[1], x[1]], showlegend=False, hoverinfo="none", line_color='#FF0000')) else: fig.add_trace( go. Scattermapbox( # Membuat Garis pada mapbox, berwarna hitam untuk garis lainnya mode="lines", lon=[i.name[2], x[2]], lat=[i.name[1], x[1]], showlegend=False, hoverinfo="none", line_color='#000000')) for i in graph: if (len(result) != 0 and (i.name[0] == result[0] or i.name[0] == result[-1])): fig.add_trace( go. Scattermapbox( # Membuat Marker pada mapbox, berwarna Merah untuk Marker Node Awal dan Node Tujuan mode="markers", lon=[i.name[2]], lat=[i.name[1]], name=i.name[0], hoverinfo='name', marker_color='#9E2B2B', marker={'size': 20})) elif (i.name[0] in result): fig.add_trace( go. Scattermapbox( # Membuat Marker pada mapbox, berwarna Hijau untuk Marker yang dilewati Node Awal dan Tujuan mode="markers", lon=[i.name[2]], lat=[i.name[1]], name=i.name[0], hoverinfo='name', marker_color='#cbd967', marker={'size': 20})) else: fig.add_trace( go. Scattermapbox( # Membuat Marker pada Mapbox, berwarna Hitam untuk marker lainnya mode="markers", lon=[i.name[2]], lat=[i.name[1]], name=i.name[0], hoverinfo='name', marker_color='#000000', marker={'size': 20})) fig.update_layout( # Membuat layout untuk mengatur zoom dan lokasi awal mapbox margin={ 'l': 0, 't': 0, 'b': 0, 'r': 0 }, mapbox={ 'center': { 'lon': float(graph[0].name[2]), 'lat': float(graph[0].name[1]) }, 'style': "light", 'center': { 'lon': float(graph[0].name[2]), 'lat': float(graph[0].name[1]) }, 'zoom': 15 }) plot_url = py.plot( fig, auto_open=False ) # Melakukan plotting, dan mengembalikan link untuk ditangkap dan dilakukan Embed pada main.html if (astaresult[1] > astaresult2[1]): output = [plot_url, astaresult2[1]] else: output = [plot_url, astaresult[1]] return (output) # Mengembalikan nilai dalam bentuk List