def __init__(self, size=9): """Creates a Go game. Game a comme attribut le goban, ie le plateau de jeu dont la taille sera passée en paramètre.""" self.tour = 0 self.goban = Goban(size) self.player1 = None self.player2 = None
def randomGame(): players = [] player1 = player1class.myPlayer() player1.newGame(Goban.Board._BLACK) players.append(player1) player2class = importlib.import_module(classNames[1]) player2 = player2class.myPlayer() player2.newGame(Goban.Board._WHITE) players.append(player2) json_data = {} b = Goban.Board() nextplayer = 0 nextplayercolor = Goban.Board._BLACK nbmoves = 1 wrongmovefrom = 0 json_data["list_of_moves"] = playgame(b, nextplayer, nextplayercolor, nbmoves, wrongmovefrom, players) result = b.result() if wrongmovefrom > 0: if wrongmovefrom == b._WHITE: json_data["winner"] = "black" elif wrongmovefrom == b._BLACK: json_data["winner"] = "white" else: print("ERROR") elif result == "1-0": json_data["winner"] = "white" elif result == "0-1": json_data["winner"] = "black" else: return with open('data.json', 'a') as f: f.write(json.JSONEncoder().encode(json_data)) f.write(',')
def __init__(self): self._board = Goban.Board() self._mycolor = None self._behavior = AlphaBeta() self._nbCoup = 0 self._movePossible1 = {"B2", "C3", "G3", "H2", "A5", "J5", "B8", "H8", "D9", "F8"} self._movePossible2 = {"D2", "F2", "B5", "H5", "C7", "G7", "E8", "A4", "J4"} self._time = 0
def __init__(self, model_champion1, model_champion2): self.b = Goban.Board() self.nextplayercolor = Goban.Board._BLACK self.nbmoves = 0 self.list_moves_colors = [[], []] self.list_of_moves = [] self.player = [] self.initialize_player(model_champion1, model_champion2)
def __init__(self): self._board = Goban.Board() self._mycolor = None self._model = tf.keras.models.load_model( 'myplayer_model.h5', custom_objects={'my_metric': my_metric}) self._last_opponent_move = None self._last_move = None self._turn = 0 self._timer = 0
def __init__(self): self._board = Goban.Board() self._mycolor = None self._black_goban = [] #Liste des coups réellement joué par Noir self._white_goban = [] #Liste des coups réellement joué par Blanc self._count = 0 #Numéro du tour #Utilisé dans l'iterative-deepening self._last_best_move = 0 self._start = 0 self._end = 0
def influence(self, col, lgn): # Influence sur les territoiress # Influence de départ inf = 0 # Detection des territoires territories = detect_territory(self.game.goban) # Nouveau cell new_cell = [] # Copie des cells for old_lines in self.game.goban.cell: new_cell.append(list(old_lines)) new_cell[lgn][col] = self.joueur.number # Ajout du coup # Nouveau goban new_goban = Goban(self.game.goban.taille) new_goban.cell = new_cell new_territories = detect_territory(new_goban) #if len(new_territories[self.joueur.number])>len(territories[self.joueur.number]): # inf += (len(new_territories[self.joueur.number])-len(territories[self.joueur.number])) inf = len(new_territories[self.joueur.number]) - len(territories[self.joueur.number]) return inf
def influence(self, col, lgn): # Influence sur les territoiress # Influence de départ inf = 0 # Detection des territoires territories = detect_territory(self.game.goban) # Nouveau cell new_cell = [] # Copie des cells for old_lines in self.game.goban.cell: new_cell.append(list(old_lines)) new_cell[lgn][col] = self.joueur.number # Ajout du coup # Nouveau goban new_goban = Goban(self.game.goban.taille) new_goban.cell = new_cell new_territories = detect_territory(new_goban) #if len(new_territories[self.joueur.number])>len(territories[self.joueur.number]): # inf += (len(new_territories[self.joueur.number])-len(territories[self.joueur.number])) inf = len(new_territories[self.joueur.number]) - len( territories[self.joueur.number]) return inf
def __init__(self): self._mycolor = None self.model = AlphaGoZero() self.device = "cpu" self.model.eval() self.max_depth: int = 100 self.simulations_per_play: int = 100 # Number of boards to keep for history self.len_history: int = 7 self.tensor_size: int = 2 * self.len_history + 1 self.board: Goban.Board = Goban.Board() self.np_array: np.ndarray = np.zeros((9, 9), dtype=np.float) self.torch_board = torch.from_numpy(self.np_array)
def build_goban_from_moves(moves): board = Goban.Board() for i, move in enumerate(moves): if move != None: name = Goban.Board.flat_to_name(move) coord = Goban.Board.name_to_coord(name) # Vérifie que le push du move ne crée pas d'erreur (ex: jouer dans un oeil) try: board.push(Goban.Board.flatten(coord)) except KeyError: return False, None return True, board
def play_a_game(): player1 = gnugoPlayer.myPlayer() player2 = gnugoPlayer.myPlayer() player1.newGame(Goban.Board._BLACK) player2.newGame(Goban.Board._WHITE) players = [player1, player2] b = Goban.Board() nextplayer = 0 nextplayercolor = Goban.Board._BLACK states = list() actions = list() while not b.is_game_over(): # save the board as state states.append(np.copy(b._board)) legals = b.legal_moves() otherplayer = (nextplayer + 1) % 2 othercolor = Goban.Board.flip(nextplayercolor) move = players[nextplayer].getPlayerMove() # save the move as chosen action actions.append(Goban.Board.name_to_flat(move)) if not Goban.Board.name_to_flat(move) in legals: # illegal move return None, None, None b.push(Goban.Board.name_to_flat(move)) players[otherplayer].playOpponentMove(move) nextplayer = otherplayer nextplayercolor = othercolor [p.endGame(None) for p in players] result = b.result() if result == "1-0": winner = 1 elif result == "0-1": winner = 0 else: winner = -1 # give reward rewards = [(-1)**(n + winner) for n in range(len(actions)) ] if winner != -1 else [0] * len(actions) return states, actions, rewards
def __init__(self, model, device): self.model = model self.device = device self.model.eval() # Max Game duration self.T: int = 200 self.max_depth: int = 100 self.v_resign: float = -0.99 self.simulations_per_play: int = 100 # Number of moves after which temperature is set to something close to 0 self.moves_after_low_temperature: int = 7 # defined empirically/proportionally # Number of boards to keep for history self.len_history: int = 7 self.tensor_size: int = 2 * self.len_history + 1 self.logger: logging.Logger = logging.getLogger("MCTS") self.board: Goban.Board = Goban.Board() self.np_array: np.ndarray = np.zeros((9, 9), dtype=np.float) self.torch_board = torch.from_numpy(self.np_array) if VISUALIZE: self.gnugo = GnuGo(9) self.moves = self.gnugo.Moves(self.gnugo)
def get_sample_probs(sample, gnugo): moves = gnugo.Moves(gnugo) # Joue les moves de la liste dans gnugo for move in sample['list_of_moves']: moves.playthis(move) status, _ = moves._gnugo.query("experimental_score " + moves._nextplayer) if status != "OK": return None status, top_moves = moves._gnugo.query("top_moves " + moves._nextplayer) top_moves = top_moves.strip().split() if len(top_moves) == 0: return None # Récupère la liste des meilleurs coups à jouer best_moves = [move for i, move in enumerate(top_moves) if i % 2 == 0] # Récupère la liste des scores associés aux meilleurs coups scores = np.array( [float(score) for i, score in enumerate(top_moves) if i % 2 == 1]) assert len(best_moves) == len(scores) probs = scores / scores.sum() sample_probs = np.zeros(82) for i, move in enumerate(best_moves): sample_probs[Goban.name_to_flat(move)] = probs[i] gnugo.query("clear_board") return sample_probs
def encoder(data, h=5, liberties=0): global rejected board = Goban.Board() moves = data["list_of_moves"] if len(moves) < h: return None b = np.zeros((9, 9, h * (2 + liberties) + 1)) for i in range(len(moves)): try: board.push(board.flatten(board.name_to_coord(moves[i]))) except Exception as var: rejected += 1 return None if i >= len(moves) - h: tmp = board_encoding(board, liberties) b[:, :, (2 + liberties) * (i - len(moves) + h):(2 + liberties) * (i - len(moves) + h + 1)] = tmp[:, :, :2 + liberties] if len(moves) % 2 == 1: b[:, :, -1] = 1 #boards = board_encoding(board) proba_move = np.array(data["proba_next_move"][:-1]).reshape((9, 9)) proba_pass = data["proba_next_move"][-1] proba_win = 2 * data["proba_win"] - 1 return b, proba_move, proba_pass, proba_win
def __init__(self): self._board = Goban.Board() self._mycolor = None
from io import StringIO import sys def fileorpackage(name): if name.endswith(".py"): return name[:-3] return name if len(sys.argv) > 2: classNames = [fileorpackage(sys.argv[1]), fileorpackage(sys.argv[2])] elif len(sys.argv) > 1: classNames = [fileorpackage(sys.argv[1]), 'myPlayer'] else: classNames = ['myPlayer', 'myPlayer'] b = Goban.Board() players = [] player1class = importlib.import_module(classNames[0]) player1 = player1class.myPlayer() player1.newGame(Goban.Board._BLACK) players.append(player1) player2class = importlib.import_module(classNames[1]) player2 = player2class.myPlayer() player2.newGame(Goban.Board._WHITE) players.append(player2) totalTime = [0,0] # total real time for each player nextplayer = 0 nextplayercolor = Goban.Board._BLACK
def __init__(self): self._board = Goban.Board() self._mycolor = None self.tree = MCTS_TREE(self._board)
reward = table['black_wins'] / table['rollouts'] else: # white plays next reward = table['white_wins'] / table['rollouts'] gnugo.query("clear_board") return probs, reward history_size = 7 samples = [] gnugo = GnuGo(9) tables = get_raw_data_go() board = Goban.Board() count = 0 for idx, table in enumerate(tqdm(tables)): assert table['depth'] == len(table['list_of_moves']) vector = np.zeros((2 * history_size + 1, 9, 9), dtype=np.float64) next_to_play = 0 skip = False for move in table['list_of_moves'][:-history_size]: try: board.push(Goban.Board.name_to_flat(move)) except Exception: skip = True
def __init__(self): self._board = Goban.Board() self._mycolor = None self.nbmoves = 0 self.total_time = 0
def reset(self): """ Reset and init the current game. """ self.board: Goban.Board = Goban.Board() self._vector = torch.zeros((self.tensor_size, 9, 9), dtype=int)
def __init__(self): self._board = Goban.Board() self._mycolor = None self._turn = 0 self._depth = 2
def __init__(self): self._board = Goban.Board() self._NNboard = NNboard(self._board) self._mycolor = None self._turn = 0 # Starting depth self._depth = 2 # Importants steps in the game (for MinMAx and AlphaBeta strategies) self._mid_game = 20 self._late_game = 40 self._very_late_game = 50 # Maximal time for a move (for Iterative Deepening) self._timeout = 5 # T0 of a move self._t0 = None # List storing time taken by the player at each move self._time_history = [] # Openings self._opening_length = 5 # Number of opening moves the player will play self._opening = None # Will contain an opening self._opening_index = 0 # To keep track of where to start checking opening moves self._openings = [] # Model self._model = None if os.path.exists('games.json'): with open('games.json') as json_file: data = json.load(json_file) for g in data: self._openings.append(g['moves']) else: self._openings = [ [ "C6", "F4", "D4", "F6", "H5", "D3", "C3", "D5", "C4", "E4", "G7", "F7", "G8", "H4", "G5", "G4", "D6", "F8", "G6", "D2", "C2", "D8", "C8", "E5", "J4", "J3", "J5", "C5", "B5", "B7", "B6", "C7", "H3", "A6", "D7", "B8", "E8", "C9", "F2", "G3", "H2", "G2", "F9", "E2", "E9", "E7", "G9", "D9", "F3", "H1", "J2", "C1", "D1", "B1", "B2", "E1", "A4", "A7" ], [ "E5", "E7", "E3", "E2", "D2", "G6", "D7", "D8", "D6", "C8", "G5", "H5", "H6", "G4", "G7", "F5", "F6", "E4", "G5", "F3", "D3", "G6", "E8", "D4", "C4", "D5", "E6", "C5", "B4", "G5", "F2", "G2", "C6", "B5", "B6", "A6", "A7", "A5", "B8", "B2", "C2", "F1" ], [ "E5", "E7", "E3", "E2", "D2", "G6", "G5", "H5", "D7", "D8", "C7", "E6", "F5", "D6", "F6", "G7", "H4", "C6", "B5", "D3", "D4", "C3", "C2", "C4", "C5", "D5", "E4", "B6", "B3", "A5", "B4", "H6", "F7", "F8", "J4", "J5", "A4", "A6" ], [ "E5", "E7", "E3", "E2", "D2", "F3", "E4", "G5", "G4", "G3", "D7", "D8", "C7", "C8", "F7", "E6", "F6", "F8", "D6", "E8", "B8", "H4", "F5", "H7", "H5", "F4", "H6", "H8", "G6", "E1", "H2", "C2", "D3", "D1", "G2", "F2", "H3", "J4", "C3", "G4", "B2", "G1", "H1", "J2", "C1", "J3", "F1", "B7", "B6", "G1", "G7", "G8", "F1", "B9", "J5", "G1", "C9", "J1", "A9", "D9", "J7", "B9", "A7", "A8", "B7", "J8", "A9", "C9", "A8", "J6", "PASS", "J7" ], [ "E5", "G5", "G4", "C6", "C5", "D5", "D4", "D6", "C4", "H4", "F4", "E6", "H5", "F5", "H6", "E4", "E3", "H3", "G7", "G2", "E8", "D8", "E7", "E2", "D2", "F2", "D7", "C7", "E5", "B5", "B4", "E4", "F3", "B8", "B6", "B7", "A5", "E9", "H8", "F9", "G8", "D1", "E1", "F1", "C2", "J4", "C9", "D3", "E5", "D9", "B9", "E4", "C3", "G3", "E5", "A7", "A9", "E4", "D3", "A6", "B5", "B2", "B1", "A3", "C1", "A1", "E5" ], [ "E4", "D6", "E6", "E7", "F6", "D4", "D3", "D5", "E3", "F7", "G7", "G8", "D7", "G6", "H7", "D8", "C7", "C8", "B6", "B7", "C6", "B5", "A6", "B8", "B4", "C4", "B3", "G5", "F4", "G4", "G3", "H3", "H2", "G2", "F2", "F3", "F8", "E8", "G3", "A5" ], [ "G4", "D5", "D3", "E7", "G7", "F5", "G5", "E3", "D4", "E4", "C5", "C6", "D6", "E5", "B6", "C7", "B4", "D2", "B7", "G8", "H8", "F7", "H6", "C3", "C4", "C8", "C2", "F2", "D1", "E1", "E2", "H7", "J7", "D2", "G2", "G3", "H3", "B2", "E2", "H2", "F1", "G1", "F8", "E1", "D2", "E8", "G9", "H4", "B8", "B9", "H5", "J3", "F4", "F3", "J5", "E9", "F9", "J4", "F6", "E6", "A8", "F1", "G6" ], [ "G4", "C6", "E7", "E5", "E3", "G5", "G7", "F4", "F3", "H7", "H4", "G6", "C7", "D6", "D7", "B7", "B8", "B5", "A7", "A6", "B6", "G2", "G3", "B7", "F6", "F5", "B6", "D2", "A5", "A4", "D3", "C2", "C3", "B3", "C5", "A6", "D5", "E6", "A5" ], [ "E5", "E7", "F7", "F3", "E6", "D7", "F8", "C6", "C5", "B5", "B4", "C4", "D5", "B6", "C3", "G5", "C8", "D4", "E3", "B3", "E4", "A4", "G3", "G6", "H7", "G7", "G8", "H8", "H9", "G2", "G4", "F4", "F2", "F5", "H2", "E2", "G1", "D3", "D2", "C2", "E8", "D1" ], [ "G4", "D5", "D3", "G6", "D7", "F7", "B7", "C3", "C2", "D2", "E2", "D4", "E3", "B2", "D1", "B4", "E8", "H4", "H5", "G5", "H3", "F4", "G3", "F8", "F9", "B6", "G8", "B8", "H6", "G7", "H7", "H8", "G9", "C7", "C8", "A7", "C6", "D6", "B7", "H9", "E9", "C7", "E7", "E6", "B7" ], [ "C5", "G5", "F5", "G6", "F4", "G4", "F6", "F7", "E7", "F3", "G7", "F8", "E8", "E3", "H7", "D5", "D4", "C4", "D6", "D3", "E5", "B4", "H6", "H5" ], [ "E5", "E7", "E3", "E2", "F2", "C6", "C5", "B5", "D6", "C4", "D5", "C7", "D7", "F3", "F4", "G3", "D2", "G2", "E1", "D8", "H5", "F8", "B3", "B4", "G7", "G8", "H8", "F6", "G6", "H9", "J8", "C3", "B2", "C2", "C1", "G4", "F5", "G5", "F7", "H4", "J6", "E8", "E6", "H2", "J3", "J4", "G1", "H1", "J2", "F1", "C8", "E2", "B8", "B6", "F2", "B1", "E2", "D1", "D9", "E4", "D4", "D3", "C1", "H6", "A1", "J5", "H7", "J7", "E9", "C9", "B9", "G9", "C9" ], [ "E5", "E3", "F3", "F4", "E4", "G3", "F2", "D3", "C4", "G2", "G4", "F5", "F6", "G5", "G6", "H4", "H5", "G4", "C6", "C3", "B4", "C7", "B7", "D7", "B8", "H7", "H6", "F8", "H8", "G8", "G7", "H9", "J8", "E6", "E7", "D6", "D5", "D8", "F7", "E8", "G9", "J5", "J7", "J4", "J6", "B6", "C5", "C8", "C9", "B9", "A9", "D9", "F9", "B9", "A6", "A8", "B5", "B3", "A4", "E9", "G9" ], [ "E6", "D4", "F4", "C6", "D3", "G7", "C4", "D5", "E3", "C3", "B3", "F5", "F6", "C2", "B5", "B6", "B2", "E4", "G4", "F3", "G3", "F2", "D2", "G5", "G6", "H5", "H6", "G2", "D7", "J6", "J7", "J5", "H7", "C8" ], [ "E6", "E4", "D4", "D3", "D5", "C3", "F4", "F3", "G4", "G3", "E7", "B5", "B4", "C4", "B6", "C5", "C6", "E5", "F5", "D6", "D7", "H4", "H5", "F6", "G5", "H6" ], [ "E5", "E7", "C6", "E3", "G4", "G6", "F7", "F6", "E6", "F8", "D7", "G7", "C3", "C2", "B2", "D3", "C4", "F4", "G2", "F5", "H5", "D8", "C8", "C9", "B9", "D9", "B8", "F2", "G3", "H6", "C1", "D2", "J2", "B1", "A1", "D1", "B3", "B1", "A2", "G1", "H1", "F1", "J6", "J7", "J5", "G5", "H4", "D4", "D5", "C1" ], [ "G4", "D5", "D3", "F6", "C7", "C5", "F8", "C3", "F3", "D2", "E2", "D4", "E3", "G5", "E7", "G8", "B6", "F7", "E8", "H5", "H4", "J4", "J3", "J5", "H2", "B5", "G9", "H8", "C2", "B2", "D1", "B8", "B7", "C8", "A8", "B1", "B3", "C4", "H9", "F9", "E9", "A6", "D8", "B9", "H7", "G7", "E6", "D6", "E5", "E4", "F5", "H6", "D7", "F4", "A2" ] ] # Retrieve learnt model saved on disk (for heuristic) # load json and create model json_file = open('./model.json', 'r') loaded_model_json = json_file.read() json_file.close() self._model = tf.keras.models.model_from_json(loaded_model_json) # load weights into new model self._model.load_weights("./model.h5") # evaluate loaded model on test data self._model.compile(loss='mae', optimizer='adam', metrics=['accuracy'])
def __init__(self): self._board = Goban.Board() self._gnugo = GnuGo.GnuGo(Goban.Board._BOARDSIZE) self._moves = self._gnugo.Moves(self._gnugo) self._mycolor = None