def main(): game = Game(MAP_NAME) for _ in range(NUM_OF_GAMES): play(game) game.reset()
# The formatting string indicates that I'm writing out # the values in left-justified columns 7 characters in width # with 2 decimal places. # outfile.write('# Gen1') for data_line in W2: np.savetxt(outfile, data_line, fmt='%-7.6f') outfile.write("# new\n".encode()) for g in range(0, 30): for i in range(0, len(world.population)): for j in range(0, len(world.population)): if not i == j: pygame.init() new_game = Game(world.population[i], world.population[j], pygame) running = True while running: new_game.update() if new_game.player1.score == 10 or new_game.player2.score == 10: world.population[i].score += new_game.player1.score pygame.display.quit() running = False # save weights of best agents of generation in text file save_weights() best = world.select(world.fitness, 4) print(g) world.new_generation(4, best)
def learn(self): ''' Perform simple Q-learning algorithm based on a Q-table Iterate over the episodes, and for each episodes try steps based on the Q-values in the q_table, update the q-values during these steps. Escape an episode when status is 'D' (dead) or 'F' (finished) ''' r_list = [] episode_reward = 0 for i in range(self.n_episodes): # Initialize a new game for each episode game = Game() if i == 0: self.empty_board = game.board j = 0 s = [game.start_x, game.start_y] while j < self.sub_steps: j += 1 # state is 1d index in q-table s = self._convert_pos(s) # Determine action based on q_table + random noise # Choose the max value from the Q-table for this state # (position on board), this is being distorted by a random # value (scaled by episode number) a = np.argmax( self.q_table[s,:] + np.random.randn(1,len(self.action_list)) *(1.0/(i+1))) # take step on board status = game.update_board(self.action_list[a]) # New gamestate (x and y position of the player s1 = [game.position_x, game.position_y] # reward r = self.r_table[status] # Based on the rewards on the current position (s1) as well as # the previous position (s) the Q-value for this state is # updated. This is determined by the learining rate (lr), the # discount factor (y) and the reward in the current position # (r). If s is outside the Q-table, reward = -1. if self._convert_pos(s1) < len(self.q_table): self.q_table[s,a] = self.q_table[s,a] + self.lr * ( r + self.y * np.max(self.q_table[self._convert_pos(s1),:] - self.q_table[s,a])) else: self.q_table[s,a] = self.q_table[s,a] + self.lr * ( r + self.y * (-1) - self.q_table[s,a]) # update reward per episode episode_reward += r # new state -> old state s = s1 # break out when dead or finished if status == 'D': self.final_status = 'D' break if status == 'F': self.final_status = 'F' break # append episode score r_list.append(episode_reward) # Garbage collection gc.collect()
def main(stdscr): curses.start_color() curses.use_default_colors() curses.init_pair(1, curses.COLOR_WHITE, -1) curses.init_pair(2, curses.COLOR_CYAN, -1) curses.init_pair(3, curses.COLOR_RED, -1) curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_RED) curses.init_pair(5, curses.COLOR_BLUE, -1) curses.init_pair(7, curses.COLOR_GREEN, -1) curses.init_pair(8, curses.COLOR_YELLOW, -1) curses.init_pair(9, curses.COLOR_RED, -1) curses.init_pair(10, curses.COLOR_RED, -1) curses.init_pair(16, curses.COLOR_GREEN, curses.COLOR_WHITE) y = "y" while y not in ("n", "N"): curr_x = 0 curr_y = 0 stdscr.clear() size = welcome(stdscr, rows) while size < 4: size = welcome(stdscr, rows) curses.setsyx(curr_y, curr_x) g = Game(n=size) g.generate_bombs() stdscr.clear() print_help(stdscr, g) start = time.time() g.print_board(stdscr) while not g.is_game_over() and not g.win(): c = stdscr.getkey() if c in ("q", "Q"): y = "N" break curr_x, curr_y = do_something(curr_x, curr_y, c, g) curses.setsyx(curr_y, curr_x) print_help(stdscr, g) g.print_board(stdscr) stdscr.refresh() end = time.time() length = round(end - start) g.print_board(stdscr) stdscr.refresh() print_help(stdscr, g) if y not in ("n", "N"): if g.win(): print_help(stdscr, g) g.print_board(stdscr) stdscr.refresh() stdscr.addstr( 13, g.high + 2 + len("You won!"), "You won!", curses.color_pair(5) + curses.A_BOLD + curses.A_BLINK) elif g.loose(): for r, c in g.bomb_locations: g.board[r][c].open(True) g.board[r][c].exploded() g.print_board(stdscr) print_help(stdscr, g) stdscr.refresh() stdscr.addstr(13, g.high + 2 + len("Game Over!"), "Game Over!", curses.A_BOLD + curses.color_pair(3)) y = print_end(stdscr, length, g)
from ai.TreeStepAI import TreeStepAI from game.Game import Game from logic.StandardLogic import StandardLogic from nerve.EvaluateCore import EvaluateCore from ui.TkinterUI import TkinterIndicator, TkinterView core = EvaluateCore("../graph/core3", "core") view = TkinterView() Game( StandardLogic(), TkinterIndicator(view), TreeStepAI(core, 1e4) ).process()
import tkinter from Board.board import Board from game.Game import Game from gui.Gui import Gui from gui.GuiGame import guigame from player.Computer import Computer from player.Human import Human from strategy.No_strategy import NoStrategy board = Board() strategy = NoStrategy() player1 = Computer("1", board, strategy) player2 = Human("2", board) game = Game(board, player1, player2) game.play() print("bye") # board = Board() # strategy = NoStrategy() # player1 = Computer("1", board, strategy) # player2 = Human("2", board) # game = guigame(player2, player1, board) # root = tkinter.Tk() # my_gui = Gui(root, game) # root.mainloop()
import argparse from game.Game import Game if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--game_mode', dest='game_mode', type=str, default='user', help="'bot' or 'user'") parser.add_argument('--latency', dest='latency', type=int, default=100, help='Latency of the snake') args = parser.parse_args() game_mode = args.game_mode latency = args.latency game = Game(game_mode, latency) game.start()
if args.operation == 'construct_all_features' and args.season: season = Season(args.season) if args.window: window = args.window else: window = 20 construct_all_features(season, window_size=window) if args.operation == 'predict_game' and args.season and args.game_id: season = Season(args.season) game = Game(args.game_id) if args.start_date: start_date = dtparser.parse(args.start_date) else: start_date = season.start_date if args.end_date: end_date = dtparser.parse(args.end_date) else: end_date = season.end_date str_format = '%Y-%m-%d' if args.input_file: input_file = args.input_file
import pygame from game.Game import Game if __name__ == '__main__': pygame.init() screen = pygame.display.set_mode((800, 600)) pygame.display.set_caption('Zombie Kill') icon = pygame.image.load('icon.png') pygame.display.set_icon(icon) ## set initial level running = Game().intro(screen) level = 1 ret_val = None while running: ret_val = Game().main(screen, level) if ret_val == "next_level": level += 1 if ret_val == "dead" or ret_val == "you win": running = Game().decision(screen, ret_val) level = 1 pygame.quit() quit()
from ai.OneStepAI import OneStepAI from ai.TreeStepAI import TreeStepAI from game.Game import Game from logic.StandardLogic import StandardLogic from nerve.EvaluateCore import EvaluateCore from nerve.utils import train_core core = EvaluateCore("../graph/core3", "core") game1 = Game(StandardLogic(), OneStepAI(core)) game2 = Game(StandardLogic(), TreeStepAI(core, 128)) train_core(core, game1, train_count=3e3, group_size=100, learning_rate=0.003) train_core(core, game2, train_count=200, group_size=10, learning_rate=0.0001)
import os from game.Game import Game if __name__ == "__main__": g = Game() while True: os.system('cls') g.print_state() g.check_state() if not g.game_over: help_str = "" for element in [g.first_brqg, g.second_brqg][g.current_brqg]: help_str += "{} - {}\n".format(element.id, element.name) print("В момента сме на бряг номер {}. \n Можем да преместим:\n{}". format(g.current_brqg + 1, help_str)) while True: user_input = input() if user_input in ["0", "1", "2", "3"]: break else: print('Моля проверете въведената стойност') g.move(user_input) else: break
import json from game.Game import Game from static import config_path with open(config_path("field.json"), "r") as read_file: json_field = json.load(read_file) w = json_field["W_IN_CELLS"] h = json_field["H_IN_CELLS"] game = Game(w, h, "py_game") print("") game.start()
from game.Game import Game import pygame game = Game(loading=True) game.players[0].isAi = True game.players[0].isTraining = False training = False for i in range(len(game.players)): if game.players[i].isTraining: training = True game.players[i - 1].isAi = True game.players[i - 1].agentAI = game.players[i].agentAI while game.isRunning: if game.isPlaying: game.move_player() game.move_ball() if training: for event in pygame.event.get(): if event.type == pygame.QUIT: for i in range(len(game.players)): game.players[i].save('game/q_best/q_weights%d' % i, 'game/q_best/q_biases%d' % i) print(game.trainingIterations) game.isRunning = False else:
import pygame from game.Game import Game from search import a_star rows = 50 columns = 50 cell_radius = 7 alpha = 0.8 pathGame = Game(cell_radius, rows, columns) def draw(game): game.gameDisplay.fill((255, 255, 255)) for dnode in game.graph.nodes: pygame.draw.circle(game.gameDisplay, dnode.color, dnode.pos, game.node_radius, dnode.width) def check(mousePos, game): if not game.findingPath: for node in pathGame.graph.nodes: left, top = node.x - game.node_radius, node.y - game.node_radius nodeRect = pygame.Rect((left, top), (2 * game.node_radius, 2 * game.node_radius)) if mousePos.contains(nodeRect): if not (node.isStart or node.isGoal or node.isWall): if game.choosingStart: node.isStart = True game.choosingStart = False