Esempio n. 1
0
def initialize_buffer_with_single_test_tuple(size):
    grid = Grid(filename="levels/" + str(size) + "x" + str(size) +
                "/grid_100.txt")
    result = set()
    states = grid.generate_all_states()
    find = False
    print("Start state: ")
    print(grid.start_state.spaces)
    for state in states:
        q_state = QState(state)
        for action in range((size - 1) * 4):

            color = int(action / 4 + 1)
            direction = int(action % 4)
            action_tu = (color, direction)

            new_state, reward, terminal = q_state.step(action_tu)

            if terminal:
                sars = (q_state, action, reward, new_state, terminal)
                print("Old state: ")
                print(q_state.state.spaces)
                print("New state")
                print(new_state.state.spaces)

                print("Action: ", action_tu)
                print("Action index: ", action)
                print("Old state winning: ", q_state.is_winning())
                print("New state winning: ", new_state.is_winning())
                result.add(sars)
                find = True
                break
        if find:
            break
    return list(result)
Esempio n. 2
0
 def __init__(self, grid_size, max_generation):
     self.grid_size = grid_size
     self.grid_size_x, self.grid_size_y = grid_size
     self.generation = 0
     self.max_generation = max_generation
     self.active_cells = 0
     self.game_board = Grid(grid_size, Cell)
     window_size = tuple([x * 5 for x in grid_size])
     self.view = GameView(window_size)
     self.empty_grid = Grid(self.grid_size, Cell)
Esempio n. 3
0
def generate_random_grid(size=4, num_colors=3):
    spaces, start_coords = random_start(size, num_colors)
    grid = Grid.create(spaces, num_colors, start_coords, end_coords=dict())

    state = grid.start_state
    searching = True
    won = False
    while searching:
        for col in range(1, num_colors + 1):

            possible_actions = state.possible_actions(check_end_tips=False)
            if len(possible_actions) == 0:
                searching = False
                break

            actions_for_color = filter_actions_by_color(possible_actions, col)
            if len(actions_for_color) == 0:
                continue

            random_action = random.choice(actions_for_color)

            state = state.next_state(random_action, check_end_tips=False)

            if state.is_winning(check_end_tips=False):
                print("State won!")
                searching = False
                won = True
                break

    if won:
        state.set_to_start_with_current_tips()
        return state
    else:
        return None
Esempio n. 4
0
def main():
    # Grab the command line options.
    options, args = get_options()

    # Instantiate the FLowFree grid.
    grid = Grid(filename="levels/" + options.level)

    # Initialize the renderer.
    renderer = GridRenderer(options.level)

    print("Algorithm: ", options.algorithm)

    value_function, policy = policy_iteration(grid) if options.algorithm == "policy_iteration" else value_iteration(grid)
    print("Completed iteration!")

    # Now let's try out our policy!
    state = grid.start_state
    while True:
      state = state.next_state(policy[state])
      
      # Draw the grid to the screen.
      renderer.render(state)

      # Break if we're in the winning state.
      if state.is_winning():
        break

    # Close the window.
    renderer.tear_down()
Esempio n. 5
0
def load_grids(size):
    print("Loading grids....")
    grids = list()
    for i in range(1, 900):
        grid = Grid(filename="levels/" + str(size) + "x" + str(size) +
                    "/grid_" + str(i) + ".txt")
        grids.append(grid)
    return grids
Esempio n. 6
0
 def _init_grid(self):
     self.grid: Grid = Grid(width=int(self.canvas_width / BOX_SIZE),
                            height=int(self.canvas_height / BOX_SIZE))
     self.grid.init_grid()
     for x in range(int(self.canvas_width / BOX_SIZE)):
         for y in range(int(self.canvas_height / BOX_SIZE)):
             if self.canvas_boxes.get(x) is None:
                 self.canvas_boxes[x] = {}
             # sleep(0.1)
             self.canvas_boxes[x][y] = BoxCanvas(self.grid.boxes[x][y],
                                                 self.canvas)
Esempio n. 7
0
    def __init__(self, layer_nodes: List[int],
                 processing_config: ProcessingConfig,
                 importance_data: ImportanceDataHandler = None,
                 processed_nn: ProcessedNNHandler = None):
        logging.info("Prepare network processing for network of size: %s" % layer_nodes)
        self.layer_nodes: List[int] = layer_nodes
        self.layer_distance: float = processing_config["layer_distance"]
        self.layer_width: float = processing_config["layer_width"]

        logging.info("Create network model...")
        self.network: NetworkModel = NetworkModel(self.layer_nodes, self.layer_width, self.layer_distance,
                                                  importance_data, processed_nn, processing_config["prune_percentage"])
        self.sample_length: float = self.network.layer_width / processing_config["sampling_rate"]
        self.grid_cell_size: float = self.sample_length / 3.0
        self.sample_radius: float = self.sample_length * 2.0

        RenderShaderHandler().set_classification_number(self.network.num_classes)
        ComputeShaderHandler().set_classification_number(self.network.num_classes)

        self.node_advection_status: AdvectionProgress = AdvectionProgress(self.network.average_node_distance,
                                                                          processing_config["node_bandwidth_reduction"],
                                                                          self.grid_cell_size * 2.0)
        self.edge_advection_status: AdvectionProgress = AdvectionProgress(self.network.average_edge_distance,
                                                                          processing_config["edge_bandwidth_reduction"],
                                                                          self.grid_cell_size * 2.0)
        self.edge_importance_type: int = processing_config["edge_importance_type"]

        logging.info("Create grid...")
        self.grid: Grid = Grid(Vector3([self.grid_cell_size, self.grid_cell_size, self.grid_cell_size]),
                               self.network.bounding_volume, self.layer_distance)

        logging.info("Prepare node processing...")
        self.node_processor: NodeProcessor = NodeProcessor(self.network)
        self.node_renderer: NodeRenderer = NodeRenderer(self.node_processor, self.grid)

        logging.info("Prepare edge processing...")
        self.edge_processor: EdgeProcessor = EdgeProcessor(self.sample_length,
                                                           edge_importance_type=self.edge_importance_type)
        self.edge_processor.set_data(self.network)
        if not self.edge_processor.sampled:
            self.edge_processor.init_sample_edge()
        self.edge_renderer: EdgeRenderer = EdgeRenderer(self.edge_processor, self.grid)

        logging.info("Prepare grid processing...")
        self.grid_processor: GridProcessor = GridProcessor(self.grid, self.node_processor, self.edge_processor, 10000.0)
        self.grid_processor.calculate_position()
        self.grid_renderer: GridRenderer = GridRenderer(self.grid_processor)

        self.action_finished: bool = False
        self.last_action_mode: NetworkProcess = NetworkProcess.RESET

        self.edge_smoothing: bool = processing_config["smoothing"]
        self.edge_smoothing_iterations: int = processing_config["smoothing_iterations"]
        self.bar: ProgressBar or None = None
Esempio n. 8
0
def main():
    # Grab the command line options.
    options, args = get_options()

    # Train by iterating over all grids in the training set, while using the
    # previous iteration's value function as the starting point for the next
    # iteration's training. The idea is that different boards of the same size
    # would be able to share states and ths would thus allow us to generalize
    # better to unseen boards.
    if options.mode == "train":
      algorithm = policy_iteration if options.algorithm == "policy_iteration" else value_iteration
      value = dict()
      policy = dict()
      for i in range(1,900):
        print("Training iteration " + str(i))
        grid = Grid(filename="levels/4x4/grid_" + str(i) + ".txt")
        new_value, policy = algorithm(grid, policy, value)
        value = new_value.copy()

      # Save the result of training to a pickle file.
      with open("policies/" + str(options.file) + ".pickle", 'wb') as handle:
        pickle.dump(policy, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # If we're in test mode, then we load up an existing policy, and have it play
    # boards numbered 16-25. We count up how many of those 10 boards it wins.
    elif options.mode == "test":
      # Load up the pickle file we saved to during training.
      with open("policies/" + str(options.file) + ".pickle", 'rb') as handle:
        policy = pickle.load(handle)

      print("Num keys: ", len(policy.keys()))

      num_wins = 0
      for i in range(900, 1000):
        print("Playing game " + str(i))
        grid = Grid(filename="levels/4x4/grid_" + str(i) + ".txt")
        num_wins += play_game(grid, policy)
      print("Testing won " + str(num_wins) + "out of 100 games!")
Esempio n. 9
0
def main():
    # Grab the command line options.
    options, args = get_options()

    # Instantiate the FLowFree grid.
    grid = Grid(filename="levels/" + options.level)

    # Initialize the renderer.
    renderer = GridRenderer(options.level)

    # Draw the grid to the screen.
    renderer.render(grid.start_state)

    # Close the window.
    renderer.tear_down()
Esempio n. 10
0
def initialize_replay_buffer_with_single_grid(size, mlp):
    print("Initialize replay buffer with single board")
    result = set()
    grid = Grid(filename="levels/" + str(size) + "x" + str(size) +
                "/grid_100.txt")
    state = QState(grid.start_state)
    for i in range(starting_items_in_replay):
        action, q_sa = mlp.get_next_action(state.get_feature_vector(),
                                           grad=False,
                                           exploration_rate=1.0)
        new_state, reward, terminal = state.step(action)
        sars = (state, action, reward, q_sa, new_state, terminal)
        result.add(sars)
        state = new_state if not terminal else QState(grid.start_state)
    return result
Esempio n. 11
0
def initialize_buffer_with_all_tuples(size, mlp):
    grid = Grid(filename="levels/" + str(size) + "x" + str(size) +
                "/grid_1.txt")
    result = set()
    states = grid.generate_all_states()
    for state in states:
        q_state = QState(state)

        # Don't store the winning state in the replay buffer.
        if q_state.is_winning():
            print("Don't include me!")
            continue

        for action in range((size - 1) * 4):

            color = int(action / 4 + 1)
            direction = int(action % 4)
            action_tu = (color, direction)

            new_state, reward, terminal = q_state.step(action_tu)
            sars = (q_state, action, reward, new_state, terminal)
            result.add(sars)
    print("Initial replay buffer size: ", len(result))
    return list(result)
Esempio n. 12
0
 def test_upward_diagonal_win(self):
     o_token = OToken()
     grid = Grid(3, 3)
     grid_controller = GridController()
     grid_controller.assign_grid(grid)
     grid.add_token_position(o_token, 0, 2)
     grid.add_token_position(o_token, 1, 1)
     grid.add_token_position(o_token, 2, 0)
     winner = grid_controller.check_for_win()
     assert winner == o_token
Esempio n. 13
0
 def test_horizontal_win(self):
     x_token = XToken()
     grid = Grid(3, 3)
     grid_controller = GridController()
     grid_controller.assign_grid(grid)
     grid.add_token_position(x_token, 0, 0)
     grid.add_token_position(x_token, 0, 1)
     grid.add_token_position(x_token, 0, 2)
     winner = grid_controller.check_for_win()
     assert winner == x_token
Esempio n. 14
0
 def test_downward_diagonal_win(self):
     x_token = XToken()
     grid = Grid(3, 3)
     grid_controller = GridController()
     grid_controller.assign_grid(grid)
     grid.add_token_position(x_token, 0, 0)
     grid.add_token_position(x_token, 1, 1)
     grid.add_token_position(x_token, 2, 2)
     winner = grid_controller.check_for_win()
     assert winner == x_token
Esempio n. 15
0
def play(file, Q, size):
	grid = Grid(filename=file)
	print("Playing ", file)

	epsilon = 0.001
	action_size = 4 * (size-1) # number of colors is (size-1), number of directions is 4.

	state = grid.start_state
	won = False

	turns = 0

	while turns < 100000:
		if random.uniform(0, 1) < epsilon or not state in Q:
			action = random.randint(0,action_size-1)
		else:
			action = np.argmax(Q[state])

		color = int(action / 4 + 1)
		direction = int(action % 4)
		action_tu = (color, direction)

		turns += 1

		if not state.is_viable_action(action_tu):
			continue

		# Advance to the next state.
		state = state.next_state(action_tu)

		# Break if we're in the winning state.
		if state.is_winning():
			won = True
			break
	renderer = Renderer("Play")
	renderer.render(state)
	renderer.tear_down()

	print ("Took " + str(turns) + " turns!")
	return won
Esempio n. 16
0
def play(mlp, size=4, index=1):
    grid = Grid(filename="levels/" + str(size) + "x" + str(size) + "/grid_" +
                str(index) + ".txt")

    # Wrap the states as QStates to get functionality
    # specifically needed for Q-learning.
    state = QState(grid.start_state)
    won = False
    turns = 0
    while turns < 1000:
        # Grab the feature vector for the given QState.
        features = state.get_feature_vector()

        # Get best action from the MLP.
        action, _ = mlp.get_next_action(features,
                                        grad=False,
                                        exploration_rate=0.05)
        #print("Take action: ", action)

        turns += 1
        if not state.is_viable_action(action):
            continue

# Advance to the next state.
        state = state.next_state(action)

        # Break if we're in the winning state.
        if state.is_winning():
            won = True
            break

    # if won:
    # 	renderer = GridRenderer("Q-Learning")
    # 	renderer.render(state.state)
    # 	renderer.tear_down()

    return won
Esempio n. 17
0
    def test_entire_game_with_two_AI(self):
        new_game = Game()
        new_game.game_mode = 2
        token_1 = XToken()
        token_2 = OToken()
        tokens = (token_1, token_2)
        new_game.add_token_options(tokens)

        game_controller = GameController()
        game_controller.assign_game(new_game)

        player_one = Player()
        player_two = Player()
        players = [player_one, player_two]
        game_controller.add_players_to_game(players)

        player_one_controller = AIPlayerController()
        player_two_controller = AIPlayerController()
        player_one_controller.assign_player(player_one)
        player_two_controller.assign_player(player_two)
        player_controllers = [player_one_controller, player_two_controller]

        player_one_controller.determine_name()
        player_two_controller.determine_name()

        random.shuffle(player_controllers)

        game_controller.assign_tokens(player_controllers)

        new_game.print_players()

        grid = Grid(3, 3)
        grid_controller = GridController()
        grid_controller.assign_grid(grid)
        while not new_game.game_over:
            game_controller.play_round(player_controllers, grid_controller)
Esempio n. 18
0
 def test_no_winner_board_full(self):
     x_token = XToken()
     o_token = OToken()
     grid = Grid(3, 3)
     grid_controller = GridController()
     grid_controller.assign_grid(grid)
     grid.add_token_position(x_token, 0, 0)
     grid.add_token_position(x_token, 0, 1)
     grid.add_token_position(x_token, 1, 2)
     grid.add_token_position(x_token, 2, 0)
     grid.add_token_position(x_token, 2, 1)
     grid.add_token_position(o_token, 0, 2)
     grid.add_token_position(o_token, 1, 0)
     grid.add_token_position(o_token, 1, 1)
     grid.add_token_position(o_token, 2, 2)
     winner = grid_controller.check_for_win()
     assert winner is None
     assert grid_controller.grid_is_full()
Esempio n. 19
0
class Game():
    def __init__(self, grid_size, max_generation):
        self.grid_size = grid_size
        self.grid_size_x, self.grid_size_y = grid_size
        self.generation = 0
        self.max_generation = max_generation
        self.active_cells = 0
        self.game_board = Grid(grid_size, Cell)
        window_size = tuple([x * 5 for x in grid_size])
        self.view = GameView(window_size)
        self.empty_grid = Grid(self.grid_size, Cell)

    def run_game(self, frames_per_second=100, display_after_end_of_game=True):
        """
        Begins running the game at an optional delay and persists the window unless display_after_end_of_game is False
        """
        keep_displaying_window = True
        while (self.generation <
               self.max_generation) and keep_displaying_window:
            self.update_generation()
            keep_displaying_window = self.view.update_screen(self.generation)
            time.sleep(1 / frames_per_second)

        while (keep_displaying_window and display_after_end_of_game):
            keep_displaying_window = self.view.update_screen(self.generation)

        sys.exit()

    def update_generation(self):
        """Checks each cell to see if it survives to the next generation"""
        next_generation = copy.deepcopy(
            self.empty_grid
        )  # create new "dead" grid using deepcopy to avoid added iteration in the grid.__init__()

        self.view.redraw_screen()  # reset screen

        row_number = 0
        for row in self.game_board:
            cell_number = 0
            for cell in row:
                number_neighbors = self.check_neighboring_living_cells(
                    (row_number, cell_number))
                if cell.is_living():
                    if number_neighbors == 2 or number_neighbors == 3:
                        next_generation.getGridItem(
                            row_number, cell_number).toggle_living(
                            )  # carry over living cell to next generation
                        self.view.draw_cell(
                            (row_number, cell_number))  # draw cell
                else:
                    if number_neighbors == 3:
                        next_generation.getGridItem(
                            row_number, cell_number
                        ).toggle_living(
                        )  # non-living cell becomes living in the next generation
                        self.view.draw_cell(
                            (row_number, cell_number))  # draw cell
                cell_number += 1
            row_number += 1

        self.game_board = next_generation  #This generation has ended, copy next generation's grid into current grid
        self.generation += 1

    def check_neighboring_living_cells(self, coordinates):
        """Check neighboring cells to see if they are living and return the number of living cells"""
        x, y = coordinates
        num_neighbors = 0
        for neighbor_coordinates in NEIGHBORING_CELLS:
            neighbor_x, neighbor_y = neighbor_coordinates
            neighbor_x = (neighbor_x + x) % self.grid_size_x
            neighbor_y = (neighbor_y + y) % self.grid_size_y
            if self.game_board.getGridItem(neighbor_x, neighbor_y).is_living():
                num_neighbors += 1

        return num_neighbors

    def load_coordinates_into_grid(self, coordinate_list):
        """Takes a list of coordinates, a tuple(int x, int y), and sets the corresponding cell to living
           Raises an IndexError if the coordinates are not
        """
        for coordinate in coordinate_list:
            x, y = coordinate
            if (x < self.grid_size_x and x >= 0 and y < self.grid_size_y
                    and y >= 0):
                self.game_board.getGridItem(x, y).toggle_living()
            else:
                raise (IndexError)

    def get_game_board(self):
        """Returns the current generation's game board"""
        return self.game_board
Esempio n. 20
0
 def __init__(self, state):
     self.grid = Grid(state)
Esempio n. 21
0
def train(file, size, Q=dict(), gamma=0.9, num_epochs=3):
	print("Train ", file)
	print("Epochs: ", num_epochs)
	grid = Grid(filename=file)
	epsilon = 1.0

	print("Generate all states!")
	all_states = grid.generate_all_states()
	state_size = len(all_states)
	print("All states: ", state_size)

	lr = 0.5
	gamma = 0.9
	winning_states = 0
	action_size = 4 * (size-1) # number of colors is 4 in 5*5 grid

	iter = 0
	for epoch in range(num_epochs):
		print("Epoch: ", epoch)
		for state in all_states:
			for action in range(action_size):

				if iter % 1000 == 0:
					print("iteration ", iter)

				if not state in Q:
					Q[state] = np.zeros((action_size))

				color = int(action /4 + 1)
				direction = int(action % 4)
				action_tu = (color, direction)

				def get_next_tuple():
					if state.is_viable_action(action_tu):
						new_state = state.next_state(action_tu)
						if new_state.is_winning():
							reward = 1000000000
							return new_state, reward
						else:
							flows = new_state.completed_flow_count()
							zeroes = new_state.num_zeroes_remaining()
							reward = -5 * zeroes
							for f in range(flows):
								reward += 1000
							return new_state, reward
					else:
						reward = -1000000
						new_state = state
						return new_state, reward

				new_state, reward = get_next_tuple()

				if not new_state in Q:
					Q[new_state] = np.zeros((action_size))

				Q[state][action] = Q[state][action] + lr * (reward + gamma * np.max(Q[new_state]) - Q[state][action])

				if new_state.is_winning():
					print("Winning State!")
					winning_states += 1
				iter+=1

	print("Number of winning states: ", winning_states)
	return Q
Esempio n. 22
0
            player_two_controller = AIPlayerController()
        else:
            player_one_controller = HumanPlayerController()
            player_two_controller = HumanPlayerController()
        player_one_controller.assign_player(player_one)
        player_two_controller.assign_player(player_two)
        player_controllers = [player_one_controller, player_two_controller]

        print(data.PLAYER_ONE_FIRST_MESSAGE)
        player_one_controller.determine_name()
        print(data.PLAYER_TWO_NEXT_MESSAGE)
        player_two_controller.determine_name()

        print(data.RANDOMLY_SELECT_FIRST_PLAYER)
        random.shuffle(player_controllers)

        game_controller.assign_tokens(player_controllers)

        new_game.print_players()

        grid = Grid(data.GRID_ROWS, data.GRID_COLUMNS)
        grid_controller = GridController()
        grid_controller.assign_grid(grid)

        while not new_game.game_over:
            game_controller.play_round(player_controllers, grid_controller)

        play_again = input(data.PLAY_AGAIN_MESSAGE)
        if str.lower(play_again) not in ["yes", "y"]:
            break
Esempio n. 23
0
snake_pos_x = 10
snake_pos_y = 10
snake_dir = 'east'

# Game variables
gameOver = False
gameStarted = False
score = 0
highscore = 0

pygame.init()

pygame.display.set_caption("Snake Game")
window = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))

grid = Grid(window, SCREEN_WIDTH, SCREEN_HEIGHT, SQUARE_SIZE)
snake = Snake(snake_pos_x, snake_pos_y, snake_dir, SQUARE_SIZE)
food = Food(15, 15, SCREEN_WIDTH / SQUARE_SIZE, SCREEN_HEIGHT / SQUARE_SIZE,
            SQUARE_SIZE)
menu = Menu(window, SCREEN_WIDTH, SCREEN_HEIGHT)

clock = pygame.time.Clock()

while True:
    pygame.time.delay(40)
    clock.tick(10)

    if gameStarted == False:
        menu.draw(score, highscore)

    if gameOver == False and gameStarted:
def main():
    win = pygame.display.set_mode((540, 600))
    pygame.display.set_caption("Sudoku")
    board = Grid(9, 9, 540, 540, win)
    key = None
    run = True
    start = time.time()
    strikes = 0
    while run:

        play_time = round(time.time() - start)

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                run = False
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_1:
                    key = 1
                if event.key == pygame.K_2:
                    key = 2
                if event.key == pygame.K_3:
                    key = 3
                if event.key == pygame.K_4:
                    key = 4
                if event.key == pygame.K_5:
                    key = 5
                if event.key == pygame.K_6:
                    key = 6
                if event.key == pygame.K_7:
                    key = 7
                if event.key == pygame.K_8:
                    key = 8
                if event.key == pygame.K_9:
                    key = 9
                if event.key == pygame.K_DELETE:
                    board.clear()
                    key = None

                if event.key == pygame.K_SPACE:
                    board.solve_gui()

                if event.key == pygame.K_RETURN:
                    i, j = board.selected
                    if board.cubes[i][j].temp != 0:
                        if board.place(board.cubes[i][j].temp):
                            print("Success")
                        else:
                            print("Wrong")
                            strikes += 1
                        key = None

                        if board.is_finished():
                            print("Game over")

            if event.type == pygame.MOUSEBUTTONDOWN:
                pos = pygame.mouse.get_pos()
                clicked = board.click(pos)
                if clicked:
                    board.select(clicked[0], clicked[1])
                    key = None

        if board.selected and key != None:
            board.sketch(key)

        redraw_window(win, board, play_time, strikes)
        pygame.display.update()