def move(self, environment): if self.model is None: self.model = DeepNeuralNetModel(Constants.MODEL_DIRECTORY + "/dnn_genetic_evolution/") BaseGameModel.move(self, environment) predicted_action = self._predict(environment, self.model) return predicted_action
def move(self, environment): BaseGameModel.move(self, environment) if self.action is None: return environment.snake_action backward_action = self.action[0] == environment.snake_action[ 0] * -1 or self.action[1] == environment.snake_action[1] * -1 return environment.snake_action if backward_action else self.action
def move(self, environment): BaseGameModel.move(self, environment) shortest_path_move_from_transposition_table = self._path_move_from_transposition_table( self.starting_node, self.fruit_node) if shortest_path_move_from_transposition_table: return shortest_path_move_from_transposition_table stack = Stack([self.starting_node]) visited_nodes = set([self.starting_node]) shortest_path = [] while stack.stack: current_node = stack.pop() if current_node == self.fruit_node: shortest_path = self._recreate_path_for_node(current_node) for move in environment.possible_actions_for_current_action( current_node.action): child_node_point = Point(current_node.point.x + move[0], current_node.point.y + move[1]) neighbor = environment.tiles[child_node_point.y][ child_node_point.x] if neighbor == Tile.empty or neighbor == Tile.fruit: child_node = Node(child_node_point) child_node.action = move child_node.previous_node = current_node if child_node not in visited_nodes and child_node not in stack.stack: visited_nodes.add(current_node) stack.push(child_node) if shortest_path: self.transposition_table[self.fruit_node] = shortest_path first_point = shortest_path[-2] return first_point.action return environment.snake_action
def move(self, environment): BaseGameModel.move(self, environment) longest_path = self.longest_path(self.starting_node, self.fruit_node, environment) for index in range(0, len(longest_path)): node = longest_path[index] if node == self.starting_node: return longest_path[index+1].action return environment.snake_action
def __init__(self, long_name, short_name, abbreviation): BaseGameModel.__init__(self, long_name, short_name, abbreviation) self.model_path = self.model_dir_path + Constants.DQN_MODEL_NAME if os.path.exists(os.path.dirname(self.model_path)): shutil.rmtree(os.path.dirname(self.model_path), ignore_errors=True) os.makedirs(os.path.dirname(self.model_path)) self.action_space = len(Action.possible()) self.ddqn = DDQNModel(self.model_input_shape, self.action_space).model self._load_model()
def move(self, environment): BaseGameModel.move(self, environment) shortest_path_move_from_transposition_table = self._path_move_from_transposition_table( self.starting_node, self.fruit_node) if shortest_path_move_from_transposition_table: return shortest_path_move_from_transposition_table shortest_path = self.shortest_path(environment, self.starting_node, self.fruit_node) if shortest_path: self.transposition_table[self.fruit_node] = shortest_path first_point = shortest_path[-2] return first_point.action return environment.snake_action
def move(self, environment): BaseGameModel.move(self, environment) if environment.is_in_fruitless_cycle(): print("Infinite fruitless cycle - game over at: " + str(environment.reward())) return environment.snake_action hamilton_path = self._hamilton_path(environment) for index in range(0, len(hamilton_path)): node = hamilton_path[index] next_index = index + 1 if next_index == len(hamilton_path): return hamilton_path[0].action elif node == self.starting_node: return hamilton_path[next_index].action return environment.snake_action
def __init__(self): BaseGameModel.__init__(self, "Shortest Path BFS", "shortest_path_bfs", "spb")
def move(self, environment): if self.model is None: self.model = DeepNeuralNetModel(Constants.MODEL_DIRECTORY + "dnn/") BaseGameModel.move(self, environment)
def __init__(self, long_name, short_name, abbreviation): BaseGameModel.__init__(self, long_name, short_name, abbreviation)
def __init__(self): BaseGameModel.__init__(self, "Shortest Path DFS", "shortest_path_dfs", "spd")
def __init__(self): BaseGameModel.__init__(self, "Random", "random", "r")
def __init__(self): BaseGameModel.__init__(self, "Longest Path", "longest_path", "lp")
def __init__(self): BaseGameModel.__init__(self, "Human", "human", "hu")
def __init__(self, runs=1000): BaseGameModel.__init__(self, "Monte Carlo", "monte_carlo", "mc") self.runs = runs
def move(self, environment): BaseGameModel.move(self, environment)
def __init__(self): BaseGameModel.__init__(self, "Deep Neural Net GE", "deep_neural_net_genetic_evolution_trainer", "dnnget")
def move(self, environment): BaseGameModel.move(self, environment) return random.choice( environment.possible_actions_for_current_action( environment.snake_action))
def __init__(self): BaseGameModel.__init__(self, "Hamilton", "hamilton", "ha")