Ejemplo n.º 1
0
 def __init__(self):
     ha = {'comp': 'vel', 'metric': 'cityblock', 'alpha': 1}
     hb = {'comp': 'stock', 'metric': 'cityblock', 'alpha': 1}
     self.a = astar.AStar(meshSize=(60, 40),
                          heur=ha,
                          heur_weight=2,
                          links=4)
     self.b = astar.AStar(meshSize=(60, 40),
                          heur=hb,
                          heur_weight=2,
                          links=4)
     self.a.build_city()
     self.b.build_city()
Ejemplo n.º 2
0
 def __init__(self, cols, lines):
     self.cols = cols
     self.lines = lines
     self.graph = [[_EMPTY] * cols for _ in range(lines)]
     self.last_duration = '...'
     # in this case the heuristic function is the same as the weights function (distance)
     self.astar = astar.AStar(self._adj, self._weights, self._weights)
Ejemplo n.º 3
0
def main():
    # validate the params
    if len(sys.argv) != 2:
        print("Usage: python3 main.py <NETWORK_FILE>")
        sys.exit(-1)

    # read in the network from file
    net = network.readFromFile(sys.argv[1])

    # create the problem model
    model = problem.Problem(net)

    # search using BFS
    bfsSearcher = bfs.BFS(model)
    bfsSearcher.search()

    # search using dijkstra
    dijkstraSearcher = dijkstra.Dijkstra(model)
    dijkstraSearcher.search()

    # search using A*
    astarSearcher = astar.AStar(model)
    astarSearcher.search()

    # search using beam search
    beamSearcher = beams.Beams(model)
    beamSearcher.search()

    # search using Iterative Deepening
    idSearcher = iterativedeepening.IterDeep(model)
    idSearcher.search()
def codalab_run(run_id):
    # a=ppp((5,5),(((1,1), (3,3)),),(0,0))
    a = None
    if run_id == 0:
        a = ppp((10, 4), (((0, 3), (0, 6)), ((3, 3), (3, 6))))
    if run_id == 1:
        a = ppp((10, 10), (
            ((1, 1), (1, 2)),
            ((1, 2), (3, 2)),
            ((6, 1), (8, 1)),
            ((8, 1), (8, 3)),
            ((6, 3), (8, 3)),
            ((3, 4), (4, 4)),
            ((4, 4), (4, 5)),
            ((6, 6), (6, 7)),
            ((1, 7), (2, 8)),
        ), (0, 0))
    obstacles = list(a.env.block_area)
    occupancy = astar.DetOccupancyGrid2D(a.env.map.height, a.env.map.width,
                                         obstacles)
    aa = []
    while not a.end():
        action = exptimax(a, 9)
        X, Y = a.env.next_location(action)
        m = a.env.entire_map()
        if m[X][Y] == a.env.map.VISITED:
            x_init = a.env.agent_location()
            x_goal = a.env.remaining_nodes()[0]
            Astar = astar.AStar((0, 0), (a.env.map.height, a.env.map.width),
                                x_init, x_goal, occupancy)
            if not Astar.solve():
                print("Not Solve")
            else:
                for j in range(len(Astar.path) - 1):
                    a1, b1 = Astar.path[j]
                    a2, b2 = Astar.path[j + 1]
                    if a2 == a1 - 1 and b1 == b2:
                        a.env.step(a.env.UP)
                    elif a2 == a1 + 1 and b1 == b2:
                        a.env.step(a.env.DOWN)
                    elif a2 == a1 and b2 == b1 - 1:
                        a.env.step(a.env.LEFT)
                    elif a2 == a1 and b2 == b1 + 1:
                        a.env.step(a.env.RIGHT)
            # aa.append(a.env.agent_location())
        else:
            aa.append(a.env.agent_location())
            a.env.step(action)
        print(action)
    aa.append(a.env.agent_location())
    print(aa, a.env.agent_distance, a.env.agent_turns)

    stats = {
        "turn": a.env.agent_turns,
        "dist": a.env.agent_distance,
        "notes": ""
    }

    return stats
Ejemplo n.º 5
0
 def test_AStar_on8x12png(self):
     ip = astar.ImageProblem('12x8.png', lambda x: x == (11, 7),
                             lambda s: abs(s[0] - 11) + abs(s[1] - 7))
     a = astar.AStar(ip)
     pth = a.search((0, 0))
     self.assertEqual(
         15, pth.g,
         "Expected a path cost of 15 from UL-corner to LR the '12x8.png'")
     states = a.list_of_states(pth)
     self.assertFalse(
         (1, 0) in states,
         "Didn't expect (1,0) on the path... path should have taken the diagonal"
     )
Ejemplo n.º 6
0
 def test_AStar_onDefaultGridProblemNoHeuristic(self):
     grid = [[1, 10, 10, 10, 10], [1, 1, 10, 10, 10], [10, 1, 1, 1, 1],
             [10, 10, 1, 10, 1], [10, 10, 1, 10, 1]]
     gp = astar.GridProblem(grid, hfn=lambda x: 0)
     a = astar.AStar(gp)
     pth = a.search((0, 0))
     self.assertEqual(8, pth.g,
                      "Expected a path cost of 8 on the default grid")
     states = a.list_of_states(pth)
     self.assertEqual(9, len(states), "Expected path to contain 9 elements")
     self.assertTrue((0, 0) in a.reached, "Expected (0,0) on reached dict")
     self.assertTrue((4, 2) in a.reached, "Expected (4,2) on reached dict")
     print("Your reached dict has: ", a.reached)
Ejemplo n.º 7
0
def main():
    # a=ppp((5,5),(((1,1), (3,3)),),(0,0))
    a = ppp((10, 10), (
        ((1, 1), (1, 2)),
        ((1, 2), (3, 2)),
        ((6, 1), (8, 1)),
        ((8, 1), (8, 3)),
        ((6, 3), (8, 3)),
        ((3, 4), (4, 4)),
        ((4, 4), (4, 5)),
        ((6, 6), (6, 7)),
        ((1, 7), (2, 8)),
    ), (0, 0))
    obstacles = list(a.env.block_area)
    occupancy = astar.DetOccupancyGrid2D(a.env.map.width, a.env.map.height,
                                         obstacles)
    aa = []
    while not a.end():
        action = exptimax(a, 9)
        X, Y = a.env.next_location(action)
        m = a.env.entire_map()
        if m[X][Y] == a.env.map.VISITED:
            newx, newy = a.env.remaining_nodes()[0]
            x_init = a.env.agent_location()
            a.env.agentX = newx
            a.env.agentY = newy
            a.env.map.visit(newx, newy)
            x_goal = a.env.agent_location()
            Astar = astar.AStar((0, 0), (a.env.map.width, a.env.map.height),
                                x_init, x_goal, occupancy)
            if not Astar.solve():
                print("Not Solve")
            else:
                a.env.agent_distance += len(Astar.path)
                for j in range(len(Astar.path) - 1):
                    a1, b1 = Astar.path[j]
                    a2, b2 = Astar.path[j + 1]
                    if a1 != a2 and b1 != b2:
                        a.env.agent_turns += 1
            a.env.path.extend(Astar.path)
            # aa.append(a.env.agent_location())
        else:
            aa.append(a.env.agent_location())
            a.env.step(action)
        print(action)
    aa.append(a.env.agent_location())
    print(aa, a.env.agent_distance, a.env.agent_turns)
    a.env.plot_path()
Ejemplo n.º 8
0
 def test_AStar_onDefaultGridProblem(self):
     grid = [[1, 10, 10, 10, 10], [1, 1, 10, 10, 10], [10, 1, 1, 1, 1],
             [10, 10, 1, 10, 1], [10, 10, 1, 10, 1]]
     gp = astar.GridProblem(grid)
     a = astar.AStar(gp)
     pth = a.search((0, 0))
     self.assertEqual(8, pth.g,
                      "Expected a path cost of 8 on the default grid")
     states = a.list_of_states(pth)
     self.assertEqual(9, len(states), "Expected path to contain 9 elements")
     self.assertTrue((0, 0) in a.reached, "Expected (0,0) on reached dict")
     self.assertTrue((0, 1) in a.reached, "Expected (0,1) on reached dict")
     self.assertFalse((0, 2) in a.reached,
                      "Didn't expect (row 0, col 2) in reached")
     # so, this isn't guaranteed to happen unless h(x) = 0
     # self.assertTrue((4,2) in a.reached, "Expected (4,2) on reached dict")
     print("Your reached dict has: ", a.reached)
Ejemplo n.º 9
0
 def test_AStar_Greedyness(self):
     grid = [[1, 10, 10, 1, 1, 1], [1, 1, 10, 1, 10, 1],
             [10, 1, 1, 1, 30, 1], [10, 10, 10, 1, 10, 1],
             [10, 10, 10, 2, 1, 1]]
     gp = astar.GridProblem(grid,
                            hfn=lambda x: 0,
                            goaltest=lambda x: x == (4, 5))
     a = astar.AStar(gp)
     pth = a.search((0, 0))
     print("PATH IS", a.list_of_states(pth), file=sys.stderr)
     self.assertEqual(10, pth.g, "Expected a path cost of 10 on this grid")
     states = a.list_of_states(pth)
     self.assertEqual(10, len(states),
                      "Expected path to contain 10 elements")
     self.assertTrue((0, 0) in a.reached, "Expected (0,0) on reached dict")
     self.assertTrue((4, 3) in a.reached, "Expected (4,3) on reached dict")
     print("Your reached dict has: ", a.reached)
Ejemplo n.º 10
0
    def test_AStar_on8x12trickypngWithNonAdmissibleHeuristic(self):
        #ip = astar.ImageProblem('12x8tricky.png', lambda x: x == (11,7), lambda s: abs(s[0]-11)+abs(s[1]-7))

        def h(s):
            wleft = abs(s[0] - 11)
            hleft = abs(s[1] - 7)
            return wleft + hleft

        ip = astar.ImageProblem('12x8tricky.png', lambda x: x == (11, 7), h)
        a = astar.AStar(ip)
        pth = a.search((0, 0))
        self.assertEqual(
            "19.41", "%.2f" % pth.g,
            "Expected to find a suboptimal path with my weird heuristic '12x8tricky.png'"
        )
        states = a.list_of_states(pth)
        self.assertFalse(
            (1, 0) in states,
            "Didn't expect (1,0) on the path... path should have taken the diagonal"
        )
Ejemplo n.º 11
0
    def findPath_ss(self):
        astar = AStar_ss.AStar(
            AStar_ss.SQ_MapHandler(self.mapdata, self.mapw, self.maph))
        start = AStar_ss.SQ_Location(self.startpoint[0], self.startpoint[1])
        end = AStar_ss.SQ_Location(self.endpoint[0], self.endpoint[1])

        s = time()
        for x in range(10):  # XXX to better compare times
            p = astar.findPath(start, end)
        e = time()

        if not p:
            print "No path found!"
        else:
            print "Found path (10 times) in %d moves and %f seconds." % (len(
                p.nodes), (e - s))
            self.pathlines = []
            self.pathlines.append((start.x * 16 + 8, start.y * 16 + 8))
            for n in p.nodes:
                self.pathlines.append(
                    (n.location.x * 16 + 8, n.location.y * 16 + 8))
            self.pathlines.append((end.x * 16 + 8, end.y * 16 + 8))
Ejemplo n.º 12
0
    def test_AStar_on8x12trickypng(self):
        #ip = astar.ImageProblem('12x8tricky.png', lambda x: x == (11,7), lambda s: abs(s[0]-11)+abs(s[1]-7))

        def h(s):
            wleft = abs(s[0] - 11)
            hleft = abs(s[1] - 7)
            diag = min(wleft, hleft)
            remainder = max(wleft, hleft) - diag
            return diag + remainder

        ip = astar.ImageProblem('12x8tricky.png', lambda x: x == (11, 7),
                                lambda x: 0)
        a = astar.AStar(ip)
        pth = a.search((0, 0))
        self.assertEqual(
            "18.41", "%.2f" % pth.g,
            "Expected a path cost of 18.41 from UL-corner to LR the '12x8tricky.png'"
        )
        states = a.list_of_states(pth)
        self.assertFalse(
            (1, 0) in states,
            "Didn't expect (1,0) on the path... path should have taken the diagonal"
        )
Ejemplo n.º 13
0
	def traverse_graph(self):

		# Creating new graph traverser
		if self.rbSelectedValue.get() == "DFS":
			if self.dfs_query == "yes":
				self.graph_traverser = dfs_iterative.DFSIterative(self.grp)
			else:
				self.graph_traverser = dfs_recursive.DFSRecursive(self.grp)
		elif self.rbSelectedValue.get() == "BFS":
			self.graph_traverser = bfs.BFS(self.grp)
		elif self.rbSelectedValue.get() == "Dijkstra":
			self.graph_traverser = dijkstra.Dijkstra(self.grp, None)
		elif self.rbSelectedValue.get() == "Astar":
			self.graph_traverser = astar.AStar(self.grp, self.rb_heuristic_value.get())

		# Traversing the graph and getting traverse node path
		self.traverse_time_start = time.time()

		self.path, self.steps = self.graph_traverser.traverse()
		if self.path == []:
			tkMessageBox.showerror("Error", "Graph traversing failed")

		self.traverse_time_end = time.time()
def main():
    # a=ppp((5,5),(((1,1), (3,3)),),(0,0))
    a = ppp(5)
    obstacles = list(a.env.block_area)
    occupancy = astar.DetOccupancyGrid2D(a.env.map.height, a.env.map.width,
                                         obstacles)
    aa = []
    while not a.end():
        action = exptimax(a, 9)
        X, Y = a.env.next_location(action)
        m = a.env.entire_map()
        if m[X][Y] == a.env.map.VISITED:
            x_init = a.env.agent_location()
            x_goal = a.env.remaining_nodes()[0]
            Astar = astar.AStar((0, 0), (a.env.map.height, a.env.map.width),
                                x_init, x_goal, occupancy)
            if not Astar.solve():
                print("Not Solve")
            else:
                for j in range(len(Astar.path) - 1):
                    a1, b1 = Astar.path[j]
                    a2, b2 = Astar.path[j + 1]
                    if a2 == a1 - 1 and b1 == b2:
                        a.env.step(a.env.UP)
                    elif a2 == a1 + 1 and b1 == b2:
                        a.env.step(a.env.DOWN)
                    elif a2 == a1 and b2 == b1 - 1:
                        a.env.step(a.env.LEFT)
                    elif a2 == a1 and b2 == b1 + 1:
                        a.env.step(a.env.RIGHT)
            # aa.append(a.env.agent_location())
        else:
            aa.append(a.env.agent_location())
            a.env.step(action)
        print(action)
    aa.append(a.env.agent_location())
    print(aa, a.env.agent_distance, a.env.agent_turns)
Ejemplo n.º 15
0
import astar
import cv2
import numpy as np

grid = cv2.imread('bin_tiny.png')[:,:,0];

maze = astar.getMaze(grid)
aStar = astar.AStar(maze)
for tile in aStar.search(maze[2][1], maze[3][5]):
    maze[tile.y][tile.x].val = 127

for i, line in enumerate(maze):
    for j, cell in enumerate(line):
        grid[i][j] = cell.val

cv2.imwrite('astar.png',np.array(grid))
b = cv2.resize( np.array(grid).astype('float'), ( 1500, 1000 ), interpolation = cv2.INTER_NEAREST )
cv2.imwrite('astar_big.png',b)
for i in aStar.search(maze[2][1], maze[3][5]):
    print (i.y, i.x)
Ejemplo n.º 16
0
		#map_res = astar_0.astar(g_map, p_from, p_to)
		astar = astar.Astar(g_map)
		map_res = astar.run(p_from, p_to)
		print(map_res)
		'''
        if map_res is not None and len(map_res) > 0:
            for point in map_res[0:len(map_res) - 1]:
                row = point.y * H_0
                col = point.x * W_0
                img_add_new[row:row + H_0,
                            col:col + W_0] = img_add_new_copy[row:row + H_0,
                                                              col:col + W_0]
            map_res.clear()
        #g_map.showArray2D()
        #创建AStar对象,并设置起点为0,0终点为9,0
        aStar = astar.AStar(g_map, astar.Point(p_from[0], p_from[1]),
                            astar.Point(p_to[0], p_to[1]))
        #开始寻路
        map_res = aStar.start()
        if map_res is None or len(map_res) == 0:
            print("Path Not Found!")
            continue

        if map_res is None or len(map_res) == 0:
            continue
        #map_res.reverse()
        last_p = map_res[0]
        for point in map_res:
            #for col,row in map_res[1:]:
            #print(row,col)
            '''
			row = point.y
Ejemplo n.º 17
0
def local_map_approx_search(aaa):
    aaaa = []

    def getAction(pp, dd):
        def recurse(s, d):
            if s.end():
                return s.reward()
            elif d == 0:
                return s.reward()
            else:
                f = -float(' inf ')
                for a in s.getLegalActions():
                    tempt = recurse(s.generateSuccessor(a), d - 1)
                    if tempt > f:
                        f = tempt
                return f

        f = -float(' inf ')
        astore = None
        for a in pp.getLegalActions():
            tempt = recurse(pp.generateSuccessor(a), dd - 1)
            if tempt > f:
                f = tempt
                astore = a
        return astore

    obstacles = list(aaa.env.block_area)
    occupancy = astar.DetOccupancyGrid2D(aaa.env.map.height, aaa.env.map.width,
                                         obstacles)
    while (aaa.end() != 1):
        pp = ppp()
        pp.env.map.data = aaa.env.local_map(aaa.lmapsize, aaa.lmapsize)
        a = getAction(pp, aaa.lmapsize * aaa.lmapsize - 1)
        X, Y = aaa.env.next_location(a)
        m = aaa.env.entire_map()
        if m[X][Y] == aaa.env.map.VISITED:
            x_init = aaa.env.agent_location()
            x_goal = aaa.env.remaining_nodes()[0]
            Astar = astar.AStar(
                (0, 0), (aaa.env.map.height, aaa.env.map.width), x_init,
                x_goal, occupancy)
            if not Astar.solve():
                print("Not Solve")
            else:
                for j in range(len(Astar.path) - 1):
                    a1, b1 = Astar.path[j]
                    a2, b2 = Astar.path[j + 1]
                    if a2 == a1 - 1 and b1 == b2:
                        aaa.env.step(aaa.env.UP)
                    elif a2 == a1 + 1 and b1 == b2:
                        aaa.env.step(aaa.env.DOWN)
                    elif a2 == a1 and b2 == b1 - 1:
                        aaa.env.step(aaa.env.LEFT)
                    elif a2 == a1 and b2 == b1 + 1:
                        aaa.env.step(aaa.env.RIGHT)
        # aa.append(a.env.agent_location())
        else:
            aaaa.append(aaa.env.agent_location())
            aaa.env.step(a)
        #print(aaaa)
    # aaaa.append(aaa.env.agent_location())
    return aaaa
Ejemplo n.º 18
0
    robot = env.GetRobots()[0]

    # tuck in the PR2's arms for driving
    tuckarms(env, robot)

    with env:
        # the active DOF are translation in X and Y and rotation about the Z axis of the base of the robot.
        robot.SetActiveDOFs([],
                            DOFAffine.X | DOFAffine.Y | DOFAffine.RotationAxis,
                            [0, 0, 1])

        goalconfig = [2.6, -1.3, -pi / 2]
        #### YOUR CODE HERE ####

        #### Implement the A* algorithm to compute a path for the robot's base starting from the current configuration of the robot and ending at goalconfig. The robot's base DOF have already been set as active. It may be easier to implement this as a function in a separate file and call it here.
        goal = np.array([goalconfig[0], goalconfig[1], goalconfig[2]])
        start = robot.GetActiveDOFValues()
        astar = astar.AStar(env, robot)
        astar.run(start, goal)

        #### Draw your path in the openrave here (see /usr/lib/python2.7/dist-packages/openravepy/_openravepy_0_8/examples/tutorial_plotting.py for examples)

        #### Draw the X and Y components of the configurations explored by A*

        #### Now that you have computed a path, execute it on the robot using the controller. You will need to convert it into an openrave trajectory. You can set any reasonable timing for the configurations in the path. Then, execute the trajectory using robot.GetController().SetPath(mypath);

        #### END OF YOUR CODE ###
    waitrobot(robot)

    raw_input("Press enter to exit...")
Ejemplo n.º 19
0
    def confirmAllPuzzlesRunOnAllSearches(self):
        # MODEL PANCAKES
        pancake_puzzle = Pancakes.BurntPancakes()
        pancake_puzzle.parseInput("small_pancakes.config")
        init_state = pancake_puzzle.initial_state
        successor_states = pancake_puzzle.getSuccessorStates(init_state)
        successor_state_costs = []
        for elem in successor_states:
            cost = pancake_puzzle.getPathCost(init_state, elem)
            successor_state_costs.append(cost)
        successor_state_heuristics = []
        for elem in successor_states:
            heuristic = pancake_puzzle.getHeuristic(init_state, elem)
            successor_state_heuristics.append(heuristic)


        # MODEL WATERJUGS
        # parse the water jugs data
        jug_puzzle = WJ.WaterJugs()
        jug_puzzle.parseInput("jugs.config")
        #print jug_puzzle.getHeuristic((0,0),(4,2))
        #wj_tests.WaterJugsTests()

        print "\n============ BREADTH FIRST SEARCH ============"
        print "\n ---- WATER JUG BFS ----"
        bfs = bread_first_search.BFS(jug_puzzle)
        bfs.bfs()

        # MODEL PATH-PLANNING
        path_puzzle = PATH.PathPlanning()
        path_puzzle.parseInput("cities.config")
        print "\n --- PATH PLANNING BFS ---"
        arlington_successors = path_puzzle.getSuccessorStates('Arlington')
        berkshire_successors = path_puzzle.getSuccessorStates('Berkshire')
        chelmsford_successors = path_puzzle.getSuccessorStates('Chelmsford')
        print path_puzzle.getHeuristic(('Berkshire', 4), ('Chelmsford', 10))
        bfs_paths = bread_first_search.BFS(path_puzzle)
        bfs_paths.bfs()

        print "\n ---- PANCAKES BFS ----"
        pancake_bfs = bread_first_search.BFS(pancake_puzzle)
        pancake_bfs.bfs()

        print "\n\n\n ============ DEPTH FIRST SEARCH ============"
        print "\n ---- WATER JUG DFS ----"
        dfs_jugs = depth_first_search.DFS(jug_puzzle)
        dfs_jugs.dfs()
        print "\n --- PATH PLANNING DFS ---"
        dfs_paths = depth_first_search.DFS(path_puzzle)
        dfs_paths.dfs()
        print "\n --- BURNT PANCAKES DFS ---"
        dfs_pancakes = depth_first_search.DFS(pancake_puzzle)
        dfs_pancakes.dfs()

        print "\n\n\n ============ ITERATIVE-DEEPENING DEPTH FIRST SEARCH ============"
        print "\n ---- WATER JUG IDDFS ----"
        iddfs_jugs = iterative_deepening_dfs.IDDFS(jug_puzzle, max_depth=1, deepening_constant=1)
        iddfs_jugs.iddfs()
        print "\n --- PATH PLANNING IDDFS ---"
        iddfs_paths = iterative_deepening_dfs.IDDFS(path_puzzle, max_depth=1, deepening_constant=1)
        iddfs_paths.iddfs()
        print "\n --- BURNT PANCAKES IDDFS ---"
        iddfs_pancakes = iterative_deepening_dfs.IDDFS(pancake_puzzle, max_depth=1, deepening_constant=1)
        iddfs_pancakes.iddfs()

        print "\n\n\n ============ UNICOST SEARCH ============"
        print "\n ---- WATER JUG UNICOST ----"
        unicost_jugs = UC.Unicost(jug_puzzle)
        unicost_jugs.unicost()
        print "\n --- PATH PLANNING UNICOST ---"
        unicost_paths = UC.Unicost(path_puzzle)
        unicost_paths.unicost()
        print "\n --- BURNT PANCAKES UNICOST ---"
        unicost_pancakes = UC.Unicost(pancake_puzzle)
        unicost_pancakes.unicost()

        print "\n\n\n ============ GREEDY SEARCH ============"
        print "\n ---- WATER JUG GREEDY ----"
        greedy_jugs = greedy_search.Greedy(jug_puzzle)
        greedy_jugs.greedy()
        print "\n --- PATH PLANNING GREEDY ---"
        greedy_paths = greedy_search.Greedy(path_puzzle)
        greedy_paths.greedy()
        print "\n --- BURNT PANCAKES GREEDY ---"
        greedy_pancakes = greedy_search.Greedy(pancake_puzzle)
        greedy_pancakes.greedy()


        print "\n\n\n ============ A* SEARCH ============"
        print "\n ---- WATER JUG A* ----"
        astar_jugs = astar_search.AStar(jug_puzzle)
        astar_jugs.astar()
        print "\n --- PATH PLANNING A* ---"
        astar_paths = astar_search.AStar(path_puzzle)
        astar_paths.astar()
        print "\n --- BURNT PANCAKES A* ---"
        astar_pancakes = astar_search.AStar(pancake_puzzle)
        astar_pancakes.astar()


        print "\n\n\n ============ Iterative Deepening A* SEARCH ============"
        print "\n ---- WATER JUG Iterative Deepening A* ----"
        idastar_jugs = iterative_deepening_astar.IDAStar(jug_puzzle, max_depth=4, deepening_constant=4)
        idastar_jugs.idastar()
        print "\n --- PATH PLANNING Iterative Deepening A* ---"
        idastar_paths = iterative_deepening_astar.IDAStar(path_puzzle, max_depth=5, deepening_constant=5)
        idastar_paths.idastar()
        print "\n --- BURNT PANCAKES Iterative Deepening A* ---"
        idastar_pancakes = iterative_deepening_astar.IDAStar(pancake_puzzle, max_depth=5, deepening_constant=5)
        idastar_pancakes.idastar()
Ejemplo n.º 20
0
    walls.append((cords_bfs[0][index], cords_bfs[1][index]))
height, width = first_maze.shape
child, parent = bfs(height, width, start, end, walls)
solution = {}
for idx in range(len(child)):
    solution[child[idx]] = parent[idx]

x_s, y_s = start[0][0], start[1][0]
x, y = end[0][0], end[1][0]
path_bfs = [(x, y)]
while (x, y) != (x_s, y_s):
    (x, y) = solution[x, y]
    path_bfs.append((x, y))
del walls
# A-star calculations
a = alg.AStar()
cords_astar = np.where(second_maze == '#')
walls = []
for index in range(len(cords_astar[0])):
    walls.append((cords_astar[0][index], cords_astar[1][index]))
a.init_grid(8, 13, walls, (7, 0), (3, 10))
path = a.solve()
# Solution for First_maze
while len(path_bfs) > 0:
    i, j = path_bfs[0]
    first_maze[i][j] = "x"
    del path_bfs[0]
print("Solution for first maze:")
print(first_maze)
# Solution for Second maze
while len(path) > 0:
Ejemplo n.º 21
0
 def path_astar(self):
     astar = astar_search.AStar(self.cities_puzzle)
     astar.astar()
     return
Ejemplo n.º 22
0
import bfs
import dfs
import iddfs
import astar
"""
main.py declares a start state, a goal state and a size of a board.
It then creates 4 objects, one of each class of 4 different searches,
declaring their arguments, as stated above.
It then calls methods in within these classes to perform the searches.
"""
if __name__ == "__main__":
    start_state = [(4, 1), (4, 2), (4, 3), (4, 4)]
    goal_state = [(2, 2), (3, 2), (4, 2), (1, 1)]
    board_size = (4, 4)
    breadth = bfs.BFS(start_state, goal_state, board_size)
    #breadth.bfs_graph()
    #breadth.bfs_tree()
    depth = dfs.DFS(start_state, goal_state, board_size)
    depth.dfs_graph()
    #depth.dfs_tree_not_randomised()
    #depth.dfs_tree_randomised()
    iddfs = iddfs.IDDFS(start_state, goal_state, board_size)
    #iddfs.iddfs()
    astar = astar.AStar(start_state, goal_state, board_size)
    #astar.astar('distance')
Ejemplo n.º 23
0
    def solve(self):
        '''Main process in order to solve brute force. The brute force solver 
        goes through every permutation of the goal list. For every element 
        in a permutation the A* (shortest path) will be calculated. When 
        the path length of a permutation is equal or lower to the best solution
        the function will be terminated and returned with the best path solution.
        '''

        # Initializing values
        best_path_length = 100
        best_path = []
        best_perm = ()
        rotations = []
        final_rotations = []

        # Going through every permutation of the goal list
        for perm in permutations(range(len(self.goal_list))):
            current_step = self.start
            self.reset_rubies(self.tile_list)
            path_list = []
            rotations = []

            # Going through every element of the permutation
            for i in perm:
                self.reset_parents(self.tile_list)
                new_step = self.goal_list[i][0]

                # Calling the A* algorithm
                a = astar.AStar(self.grid_height, self.grid_width, \
                    self.tile_list, current_step, new_step, self.has_hammer, \
                    self.direction)
                astar_solution, self.direction, rot = a.process()
                rotations.extend(rot)
                # Delete the first step
                if len(astar_solution) > 0:
                    del astar_solution[-1]

                # Reverse the path
                if astar_solution is not None:
                    path_list.extend(reversed(astar_solution))
                else:
                    continue

                # Calculate current path length
                path_length = len(path_list)
                current_step = path_list[-1]

                # Replace the path length if the current path length is lower.
                if (path_length == self.best_solution
                        and self.check_list(path_list)):
                    best_path_length = path_length
                    best_perm = perm
                    best_path = path_list.copy()
                    final_rotations = rotations.copy()

            #Terminate if best path length is shorter or equal than given best solution
            if best_path_length == self.best_solution:
                break

        final_rotations = list(reversed(final_rotations))
        return best_path, final_rotations, best_perm
Ejemplo n.º 24
0
    if event.keysym == "Up":
        a_star.up()
    elif event.keysym == "Down":
        a_star.down()
    elif event.keysym == "Left":
        a_star.left()
    else:
        a_star.right()


if __name__ == "__main__":
    width, height = 800, 800
    rows, cols = 25, 25

    root = Tk()
    root.geometry("{}x{}".format(width, height))

    canvas = Canvas(root, width=width, height=height)
    canvas.pack()

    a_star = astar.AStar(root, canvas, width, height, rows, cols)

    root.bind("<Up>", event)
    root.bind("<Down>", event)
    root.bind("<Left>", event)
    root.bind("<Right>", event)

    root.after(0, a_star.run())
    root.after(0, a_star.keep_window())
    root.mainloop()
Ejemplo n.º 25
0
def main():
    # get number of args passed in via command line
    num_args = len(sys.argv)

    # ensure we have valid input
    if num_args == 1:
        print "Usage: 'python puzzlesolver.py [config_filename] [search_algorithm_name] [optional: heuristic] "
        print "     to run test suite:  'python puzzlesolver.py -t'"
        return
    # check if we are running the test suite
    if sys.argv[1] == "-t":
        # run the test suite
        runTests()
        return

    # if we get this far, then we are running a specific algorithm
    if num_args < 3 or num_args > 4:
        print "Usage: 'python puzzlesolver.py [config_filename] [search_algorithm_name] [optional: heuristic] "
        print "     to run test suite:  'python puzzlesolver.py -t'"


    # otherwise, parse the args, and
    # take 2 input args... plus an optional one....
    # FIRST ARG --> a configuration file
    config_file = sys.argv[1]
    # SECOND ARG --> Keyword to specify which algorithm to use: bfs, dfs, iddfs, unicost, greedy, astar, diastar
    algorithm = sys.argv[2]
    # THIRD ARG --> Heuristic
    if num_args == 4:
        heuristic = sys.argv[3]

    """ parse the config file based on what's in the first line"""
    # do that here.... need to write a function just to get first line and then call the appropriate
    # parse based on what's sent in

    # grab all the data and store it as a single string
    with open(config_file, 'r') as f:
        data_as_string = f.read()

    # split the data we've just read on newline, so we can index into it
    data_array = data_as_string.split("\n")

    # ensure that we actually have pancake data
    puzzle_name = data_array[0]
    puzzle = None
    # check which puzzle we have and open the correct file for it
    if "jugs" in puzzle_name:
         # then it's the jug puzzle
        puzzle = jugs.WaterJugs()
        puzzle.parseInput(config_file)
    elif "pancake" in puzzle_name:
        # then it's burnt pancakes puzzle
        puzzle = pancakes.BurntPancakes()
        puzzle.parseInput(config_file)
    elif "cities" in puzzle_name:
        # then it's path planning
        puzzle = paths.PathPlanning()
        puzzle.parseInput(config_file)
    else:
        # else it's nothing, and we have an invalid file
        print "Invalid data file. Please make sure your file has the correct format."
        return


    # determine which algorithm to initialize
    if algorithm == "bfs":
        print "---- START BFS ----"
        search = breadth_first_search.BFS(puzzle)
        search.bfs()
        print "---- END BFS ----"

    elif algorithm == "dfs":
        print "---- START DFS ----"
        search = depth_first_search.DFS(puzzle)
        search.dfs()
        print "---- END DFS ----"

    elif algorithm == "iddfs":
        print "---- START IDDFS ----"
        search = iterative_deepending_dfs.IDDFS(puzzle, 1, 1)
        search.iddfs()
        print "---- END IDDFS ----"

    elif algorithm == "unicost":
        print "----- START UNICOST ----"
        search = unicost_search.Unicost(puzzle)
        search.unicost()
        print "----- END UNICOST ----"

    elif algorithm == "greedy":
        print "----- START GREEDY -----"
        search = greedy_search.Greedy(puzzle)
        search.greedy()
        print "----- END GREEDY -----"

    elif algorithm == "astar":
        print "----- START ASTAR -----"
        search = astar_search.AStar(puzzle)
        search.astar()
        print "----- END ASTAR -----"

    elif algorithm == "idastar":
        print "----- START IDASTAR -----"
        search = idastar_search.IDAStar(puzzle, 5)
        search.idastar()
        print "----- END IDASTAR -----"
    else:
        print "Invalid algorithm name."


    return
Ejemplo n.º 26
0
 def pancake_astar(self, puzzle):
     astar = astar_search.AStar(puzzle)
     astar.astar()
     return
Ejemplo n.º 27
0
def test1():
    w, h = 6, 3
    start, goal = (0, 0), (3, 2)
    walls = [(2, 1), (3, 1), (4, 1)]
    obj = astar.AStar(w, h, start, goal, walls, astar.DIAGONAL_DIS)
    print(obj.result)
Ejemplo n.º 28
0
def TDlearning(ppp, eps=0.3, iteration=200, max=10000):
    model = Sequential()
    #model.add(LocallyConnected2D(5, (3, 3),
    #       input_shape=(1,5, 5), padding='valid',))
    # model.add(Flatten(input_shape=(1,5, 5)))
    model.add(Dense(50, activation='relu', input_dim=104))
    model.add(Dense(30, activation='relu'))
    model.add(Dense(30, activation='relu'))
    model.add(Dense(30, activation='relu'))
    model.add(Dense(1, activation='linear'))
    model.compile(loss='mse', optimizer='adam', metrics=['mae'])
    #model.compile(loss='mse',optimizer=keras.optimizers.SGD(lr=0.0001, momentum=0.9, nesterov=True))
    for i in range(iteration):
        ttttt = list(ppp.env.counter.get_data(100).flatten())
        ttttt.append(ppp.env.agentX)
        ttttt.append(ppp.env.agentY)
        ttttt.append(ppp.env.agent_turns)
        ttttt.append(ppp.env.agent_distance)
        ttttt = np.expand_dims(ttttt, axis=0)
        val = model.predict(ttttt)
        print("i=", i, val)
        temptppp = copy.deepcopy(ppp)
        obstacles = list(temptppp.env.block_area)
        occupancy = astar.DetOccupancyGrid2D(temptppp.env.map.height,
                                             temptppp.env.map.width, obstacles)
        j = 0
        while temptppp.end() != 1 and j < max:
            j += 1
            #print(temptppp.env.counter.data)
            turns = temptppp.env.agent_turns
            distance = temptppp.env.agent_distance
            unvisited = temptppp.env.num_unvisited_nodes()
            if random.random() < eps:
                a = random.choice(temptppp.getLegalActions())
                newppp = temptppp.generateSuccessor(a)
                ttttt = list(newppp.env.counter.get_data(100).flatten())
                ttttt.append(newppp.env.agentX)
                ttttt.append(newppp.env.agentY)
                ttttt.append(newppp.env.agent_turns)
                ttttt.append(newppp.env.agent_distance)
                ttttt = np.expand_dims(ttttt, axis=0)
                val = model.predict(ttttt)
                differturns = newppp.env.agent_turns - turns
                differdistance = 1
                differunvisited = newppp.env.num_unvisited_nodes() - unvisited
                fff = reward = val - differturns * 2 - 2 - differunvisited * 10
            else:
                fff = [[-float('Inf')]]
                for ttt in temptppp.getLegalActions():
                    newppp = temptppp.generateSuccessor(ttt)
                    ttttt = list(newppp.env.counter.get_data(100).flatten())
                    ttttt.append(newppp.env.agentX)
                    ttttt.append(newppp.env.agentY)
                    ttttt.append(newppp.env.agent_turns)
                    ttttt.append(newppp.env.agent_distance)
                    ttttt = np.expand_dims(ttttt, axis=0)
                    val = model.predict(ttttt)
                    differturns = newppp.env.agent_turns - turns
                    differdistance = 1
                    differunvisited = newppp.env.num_unvisited_nodes(
                    ) - unvisited
                    reward = val - differturns * 2 - 2 - differunvisited * 10
                    if reward[0][0] > fff[0][0]:
                        a = ttt
                        fff = reward
            X, Y = temptppp.env.next_location(a)
            m = temptppp.env.entire_map()
            if m[X][Y] == temptppp.env.map.VISITED:
                x_init = temptppp.env.agent_location()
                x_goal = temptppp.env.remaining_nodes()[0]
                Astar = astar.AStar(
                    (0, 0), (temptppp.env.map.height, temptppp.env.map.width),
                    x_init, x_goal, occupancy)
                Astar.solve()
                a1, b1 = Astar.path[0]
                a2, b2 = Astar.path[1]
                if a2 == a1 - 1 and b1 == b2:
                    a = temptppp.env.UP
                elif a2 == a1 + 1 and b1 == b2:
                    a = temptppp.env.DOWN
                elif a2 == a1 and b2 == b1 - 1:
                    a = temptppp.env.LEFT
                elif a2 == a1 and b2 == b1 + 1:
                    a = temptppp.env.RIGHT
                newppp = temptppp.generateSuccessor(a)
                differturns = newppp.env.agent_turns - turns
                differdistance = 1
                differunvisited = newppp.env.num_unvisited_nodes() - unvisited
                fff = reward = val - differturns * 2 - 2 - differunvisited * 10
            target = fff
            ttttt = list(temptppp.env.counter.get_data(100).flatten())
            ttttt.append(temptppp.env.agentX)
            ttttt.append(temptppp.env.agentY)
            ttttt.append(temptppp.env.agent_turns)
            ttttt.append(temptppp.env.agent_distance)
            ttttt = np.expand_dims(ttttt, axis=0)
            model.fit(ttttt, target, epochs=1, verbose=0)
            temptppp.env.step(a)
        ttttt = list(temptppp.env.counter.get_data(100).flatten())
        ttttt.append(temptppp.env.agentX)
        ttttt.append(temptppp.env.agentY)
        ttttt.append(temptppp.env.agent_turns)
        ttttt.append(temptppp.env.agent_distance)
        ttttt = np.expand_dims(ttttt, axis=0)
        # if temptppp.end()==1:
        #while model.predict(ttttt)>1:
        model.fit(ttttt, [[0.0]], epochs=5, verbose=0)
        ttttt = list(temptppp.env.counter.get_data(100).flatten())
        ttttt.append(temptppp.env.agentX)
        ttttt.append(temptppp.env.agentY)
        ttttt.append(temptppp.env.agent_turns)
        ttttt.append(temptppp.env.agent_distance)
        ttttt = np.expand_dims(ttttt, axis=0)
        print('end', model.predict(ttttt))

    j = 0
    while ppp.end() != 1 and j < 500:
        j += 1  #
        fff = -float('Inf')
        turns = ppp.env.agent_turns
        distance = ppp.env.agent_distance
        unvisited = ppp.env.num_unvisited_nodes()
        for ttt in ppp.getLegalActions():
            newppp = ppp.generateSuccessor(ttt)
            ttttt = list(newppp.env.counter.get_data(100).flatten())
            ttttt.append(newppp.env.agentX)
            ttttt.append(newppp.env.agentY)
            ttttt.append(newppp.env.agent_turns)
            ttttt.append(newppp.env.agent_distance)
            ttttt = np.expand_dims(ttttt, axis=0)
            val = model.predict(ttttt)
            differturns = newppp.env.agent_turns - turns
            differdistance = 1
            differunvisited = newppp.env.num_unvisited_nodes() - unvisited
            reward = val - differturns * 2 - 2 - differunvisited * 10
            if reward > fff:
                a = ttt
                fff = val
        X, Y = ppp.env.next_location(a)
        m = ppp.env.entire_map()
        if m[X][Y] == ppp.env.map.VISITED:
            x_init = ppp.env.agent_location()
            x_goal = ppp.env.remaining_nodes()[0]
            Astar = astar.AStar(
                (0, 0), (ppp.env.map.height, ppp.env.map.width), x_init,
                x_goal, occupancy)
            Astar.solve()
            a1, b1 = Astar.path[0]
            a2, b2 = Astar.path[1]
            if a2 == a1 - 1 and b1 == b2:
                a = ppp.env.UP
            elif a2 == a1 + 1 and b1 == b2:
                a = ppp.env.DOWN
            elif a2 == a1 and b2 == b1 - 1:
                a = ppp.env.LEFT
            elif a2 == a1 and b2 == b1 + 1:
                a = ppp.env.RIGHT
        ppp.env.step(a)
        print(a)
    print(a, turns, distance, unvisited)
    print(ppp.env.counter.data)
    print(ppp.env.agent_turns, ppp.env.agent_distance)