Example #1
0
def handle_merge_check(co, handle, point, txn):
    change = handle_last_modified(co, co.contents, handle, point, txn)
    if change is None:
        return None

    hcache = {}
    cDFS = DFS(_merge_check_deps, [co, handle, txn, hcache])
    cDFS.search(change)
    ordering = cDFS.result()

    for point in ordering:
        hinfo = hcache[point]

        if not hinfo.has_key('handle') and len(hinfo['precursors']) > 1:
            raise HistoryError, 'cannot automatically merge changes'

    return
Example #2
0
def handle_merge_check(co, handle, point, txn):
    change = handle_last_modified(co, co.contents, handle, point, txn)
    if change is None:
        return None

    hcache = {}
    cDFS = DFS(_merge_check_deps, [co, handle, txn, hcache])
    cDFS.search(change)
    ordering = cDFS.result()

    for point in ordering:
        hinfo = hcache[point]

        if not hinfo.has_key('handle') and len(hinfo['precursors']) > 1:
            raise HistoryError, 'cannot automatically merge changes'

    return
Example #3
0
def sync_history(co, point, txn, cache=dict()):
    named, modified, manifest = [], [], {}

    sync_dfs = DFS(_history_deps, [co, txn, cache])
    sync_dfs.search(point)
    points = sync_dfs.result()

    for npoint in points:
        named, modified, unnamed, unmodified = \
               _sync_history(co, npoint, txn, cache=cache)
        unchecked = dict.fromkeys(unnamed)
        for handle in named:
            if handle in unchecked:
                continue
            _verify_manifest(co, handle, npoint, txn)
        co.changesdb.put(binascii.hexlify(npoint), '', txn=txn)

    return named, modified
Example #4
0
def sync_history(co, point, txn, cache=dict()):
    named, modified, manifest = [], [], {}

    sync_dfs = DFS(_history_deps, [co, txn, cache])
    sync_dfs.search(point)
    points = sync_dfs.result()

    for npoint in points:
        named, modified, unnamed, unmodified = \
               _sync_history(co, npoint, txn, cache=cache)
        unchecked = dict.fromkeys(unnamed)
        for handle in named:
            if handle in unchecked:
                continue
            _verify_manifest(co, handle, npoint, txn)
        co.changesdb.put(binascii.hexlify(npoint), '', txn=txn)

    return named, modified
Example #5
0
def main():
    for line_number, element in enumerate(get_input("input.txt")):
        print(element)
        board_size = int(element[0])
        board_puzzle_config = element[3]
        board_initial_depth = 1
        max_depth = int(element[1])
        max_length = int(element[2])

        board = Board(board_size, board_puzzle_config, None, 0,
                      board_initial_depth)

        # DFS
        dfs = DFS(board, max_depth, line_number)
        dfs.search()

        # BFS
        bfs = BFS(board, max_length, line_number)
        bfs.search()

        # A*
        astar = ASTAR(board, max_length, line_number)
        astar.search()
Example #6
0
 def depthFirstSearch(self):
     print("\nIniciando Busqueda Depth First Search")
     print("------------------------------------------")
     dfs = DFS(self.listNodes, self.searched)
     dfs.search()
Example #7
0
N.B. a/b corresponds to the number of nodes where the setup in <a> has

    domain_values = ['knight', 'king']
    
and the setup in <b> has

    domain_values = ['king', 'knight']. 



"""

print("Grid:\n", Setup.grid)
print(f"(Size {Setup.grid_size})")

dfs = DFS()
node = dfs.search()

grid = Grid(np.ndarray((Setup.grid_size, Setup.grid_size), dtype=str))
for point, value in node.state.assignment.items():
    grid[point] = ['O', 'K'][value == 'knight']

print("Grid:\n", grid)







Example #8
0
def handle_contents_at_point(co, handle, point, txn, dcache=None, replayfunc=replay):
    if dcache is None:
        dcache = {}
    #staticinfo = bdecode(co.staticdb.get(handle, txn=txn))
    staticinfo = db_get(co, co.staticdb, handle, txn)
    if staticinfo['type'] != 'file':
        raise ValueError, 'Expected type \"file\", got type \"%s\"' % \
              (staticinfo['type'],)

    change = handle_last_modified(co, co.contents, handle, point, txn)
    if change is None:
        return None

    hcache = {}
    cache = _mini_dag_refcount(co, handle, change, txn, info_cache=hcache)
    hfile = open(path.join(co.cpath, binascii.hexlify(handle)), 'rb')
    #hfile = open(path.join(co.cpath, 'diffs'), 'rb')

    cDFS = DFS(_content_deps, [hcache])
    cDFS.search(change)
    ordering = cDFS.result()

    for point in ordering:
        hinfo = hcache[point]

        if hinfo['handle'].has_key('delete'):
            # Pick contents of an ancestor, doesn't really matter
            cache[point]['info'] = cache[hinfo['precursors'][0][0]].copy()
            cache[point]['info']['delete'] = hinfo['handle']['delete']

        # put together the precursor list and decrement refcounts
        precursors = []
        for pre, foo in hinfo['precursors']:
            precursors.append(cache[pre]['info'])

            cache[pre]['refcount'] -= 1
            if cache[pre]['refcount'] == 0:
                del cache[pre]

        if hinfo['handle'].has_key('delete'):
            # This has to be done after decrementing refcounts, whereas the
            # content setting has to be done before.
            continue

        # read the diff
        if dcache.has_key(point):
            diff = dcache[point]
        else:
            diff = _read_diff(hinfo, hfile)

        if diff is None:
            if len(hinfo['precursors']) > 1:
                raise HistoryError, 'cannot automatically merge changes'
            raise HistoryError, 'change with no diff'

        diff = bdecode(zlib.decompress(diff))

        # finally, get the contents
        cache[point]['info'] = _handle_contents_at_point(point, hinfo,
                                                         precursors, diff,
                                                         replayfunc=replayfunc)

    hfile.close()

    cache[change]['info']['type'] = staticinfo['type']
    return cache[change]['info']
Example #9
0
puzzle = Puzzle(input_puzzle)

# Check if puzzle is solvable
if len(puzzle.puzzle) < 2:
    raise Exception(
        "Invalid puzzle size: Puzzles must be at least two tiles large.\nExiting Program..."
    )
if not puzzle.is_tiled_correctly():
    raise Exception(
        "Incorrect tile values were provided. \nExiting Program...")
puzzle.goal_gen()
if not puzzle.is_puzzle_solvable():
    option_continue = input(
        "This puzzle may not be solvable, do you want to continue? (Y/N)\n")
    if option_continue == 'N' or option_continue == 'n':
        print("Exiting Program...")
        exit()
puzzle.set_rows_and_columns()

if option == "1":
    dfs = DFS(puzzle)
    dfs.search()
elif option == "2":
    bfs = BFS(puzzle)
    bfs.search()
elif option == "3":
    a_star = AStar(puzzle)
    a_star.search()
else:
    raise Exception("Invalid option. Option must be (1,2,3)")
Example #10
0
	solution=""
	while(1):
		print("\nplease choose the searching method:\nb :BFS\nd :DFS\nu :UCS")
		print("g_1 :Greedy best first search (heuristic 1)") 
		print("g_2 :Greedy best first search (heuristic 2)")
		print("a_1 :A* search (heuristic 1)")
		print("a_2 :A* search (heuristic 2)")
		print("Others :quit")
     
		instr=raw_input(":")
		if instr=="b":
			bfs=BFS(root)
			solution=bfs.search()
		elif instr=="d":
			dfs=DFS(root)
			solution=dfs.search()
		elif instr=="u":
			ucs=UCS(root)
			solution=ucs.search()
		elif instr=="g_1":
			gbfs=GreedyBFS(root,1)  # initialize with heuristic 1
			solution=gbfs.search()
		elif instr=="g_2":
			gbfs=GreedyBFS(root,2)  # initialize with heuristic 2
			solution=gbfs.search(2)
		elif instr=="a_1":
			a_star=A_star(root,1)
			solution=a_star.search()
		elif instr=="a_2":
			a_star=A_star(root,2)
			solution=a_star.search(2)
Example #11
0
def main():
    goal_state = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 0]]
    state = []

    matrix_length = int(input("enter matrix length : "))

    for i in range(matrix_length):
        x = []
        y = input().split(' ')
        for j in range(len(y)):
            x.append(int(y[j]))
        state.append(x)

    # ida_linear_time_start = time()
    # ida_star_linear = IDA(root=Node(state), goal=goal_state)
    # output = ida_star_linear.search(linear_conflict)
    # print("IDA with linear conflict expanded nodes : {}".format(ida_star_linear.expanded_nodes))
    # print("A star with linear conflict visited nodes : {}".format(ida_star_linear.visited_nodes))
    # print("A star with linear conflict execute time : {}\n".format(time() - ida_linear_time_start))

    # ida_manhattan_time_start = time()
    # ida_star_manhattan = IDA(root=Node(state), goal=goal_state)
    # output = ida_star_manhattan.search(manhattan_distance)
    # print("IDA with linear conflict expanded nodes : {}".format(ida_star_manhattan.expanded_nodes))
    # print("A star with linear conflict visited nodes : {}".format(ida_star_manhattan.visited_nodes))
    # print("A star with linear conflict execute time : {}\n".format(time() - ida_manhattan_time_start))

    # a_star_linear = AStar(root=Node(state=state), goal=goal_state)
    # a_star_linear_time_start = time()
    # output = a_star_linear.search(linear_conflict)
    # print("A star with linear conflict expanded nodes : {}".format(a_star_linear.expanded_nodes))
    # print("A star with linear conflict visited nodes : {}".format(a_star_linear.visited_nodes))
    # print("A star with linear conflict execute time : {}\n".format(time() - a_star_linear_time_start))

    # a_star_manhattan = AStar(root=Node(state=state), goal=goal_state)
    # a_star_manhattan_time_start = time()
    # output = a_star_manhattan.search(manhattan_distance)
    # print("A star with manhattan expanded nodes : {}".format(a_star_manhattan.expanded_nodes))
    # print("A star with manhattan visited nodes : {}".format(a_star_manhattan.visited_nodes))
    # print("A star with manhattan execute time : {}\n".format(time() - a_star_manhattan_time_start))

    # bidirectional = BDS(root=Node(state), goal=Node(goal_state))
    # bds_time = time()
    # output = bidirectional.search()
    # print("BDS expand nodes : {} \n".format(bidirectional.expanded_nodes))
    # print("BDS execute time : {}\n".format(time() - bds_time))

    # bfs = BFS(root=Node(state), goal=goal_state)
    # bfs_time = time()
    # output = bfs.search()
    # print("bfs expanded nodes : {}".format(bfs.expanded_nodes))
    # print("bfs visited nodes: {}".format(bfs.visited_nodes))
    # print("bfs execute time: {}\n".format(time() - bfs_time))

    dfs = DFS(root=Node(state), goal=goal_state)
    dfs_time = time()
    output = dfs.search()
    print("dfs expanded nodes : {}".format(dfs.expanded_nodes))
    print("dfs visited nodes: {}".format(dfs.visited_nodes))
    print("dfs execute time: {}\n".format(time() - dfs_time))

    # ucs = UCS(UcsNode(state=state, cost_up=5, cost_down=1, cost_left=1, cost_right=2), goal=goal_state)
    # ucs_time = time()
    # output = ucs.search()
    # print("ucs expanded nodes : {}".format(ucs.expanded_nodes))
    # print("ucs visited nodes: {}".format(ucs.visited_nodes))
    # print("ucs execute time: {}\n".format(time() - ucs_time))

    final_file = open('final.txt', 'w')
    final_file.write(f'{len(state)}\n')

    for out in output:
        for key in out:
            print(key)
        print()
        final_file.write(standard_printer(out))
        final_file.write('\n')
Example #12
0
def handle_contents_at_point(co,
                             handle,
                             point,
                             txn,
                             dcache=None,
                             replayfunc=replay):
    if dcache is None:
        dcache = {}
    #staticinfo = bdecode(co.staticdb.get(handle, txn=txn))
    staticinfo = db_get(co, co.staticdb, handle, txn)
    if staticinfo['type'] != 'file':
        raise ValueError, 'Expected type \"file\", got type \"%s\"' % \
              (staticinfo['type'],)

    change = handle_last_modified(co, co.contents, handle, point, txn)
    if change is None:
        return None

    hcache = {}
    cache = _mini_dag_refcount(co, handle, change, txn, info_cache=hcache)
    hfile = open(path.join(co.cpath, binascii.hexlify(handle)), 'rb')
    #hfile = open(path.join(co.cpath, 'diffs'), 'rb')

    cDFS = DFS(_content_deps, [hcache])
    cDFS.search(change)
    ordering = cDFS.result()

    for point in ordering:
        hinfo = hcache[point]

        if hinfo['handle'].has_key('delete'):
            # Pick contents of an ancestor, doesn't really matter
            cache[point]['info'] = cache[hinfo['precursors'][0][0]].copy()
            cache[point]['info']['delete'] = hinfo['handle']['delete']

        # put together the precursor list and decrement refcounts
        precursors = []
        for pre, foo in hinfo['precursors']:
            precursors.append(cache[pre]['info'])

            cache[pre]['refcount'] -= 1
            if cache[pre]['refcount'] == 0:
                del cache[pre]

        if hinfo['handle'].has_key('delete'):
            # This has to be done after decrementing refcounts, whereas the
            # content setting has to be done before.
            continue

        # read the diff
        if dcache.has_key(point):
            diff = dcache[point]
        else:
            diff = _read_diff(hinfo, hfile)

        if diff is None:
            if len(hinfo['precursors']) > 1:
                raise HistoryError, 'cannot automatically merge changes'
            raise HistoryError, 'change with no diff'

        diff = bdecode(zlib.decompress(diff))

        # finally, get the contents
        cache[point]['info'] = _handle_contents_at_point(point,
                                                         hinfo,
                                                         precursors,
                                                         diff,
                                                         replayfunc=replayfunc)

    hfile.close()

    cache[change]['info']['type'] = staticinfo['type']
    return cache[change]['info']