コード例 #1
0
    'numIterations': 9,
    'numEpisodes': 25,
    'tempThreshold': 15,
    'updateThreshold': 0.6,
    'maxlenOfQueue': 200000,
    'numMCTSSims': 25,
    'arenaCompare': 40,
    'cpuct': 1,
}

loadSaveArgs = {'checkpoint': './checkpoints/tictactoe/keras/' + args_to_filename(model_params),
                'load_model': False,
                'load_folder_file': ('models/tictactoe/keras/' + args_to_filename(model_params), 'best.pth.tar'),
                'numItersForTrainExamplesHistory': 10}

args = dotdict({**model_params,**loadSaveArgs})

if __name__ == "__main__":

    game = TicTacToeGame(3)
    nnet = keras_tictactoe_neuralnet(game)
    c = Coach(game, nnet, args)

    pathlib.Path(args.checkpoint).mkdir(parents=True, exist_ok=True)
    if args.load_model:
        pathlib.Path(args.load_folder_file[0]).mkdir(parents=True, exist_ok=True)

        nnet.load_checkpoint(args.load_folder_file[0], args.load_folder_file[1])

        print("Load trainExamples from file")
        c.loadTrainExamples()
コード例 #2
0
from tictactoe.TicTacToeGame import TicTacToeGame
from tictactoe.TicTacToePlayers import HumanPlayer, RandomPlayer, KerasNeuralNetPlayer
from alpha_zero.Arena import Arena
from alpha_zero.utils import dotdict
"""
use this script to play any two agents against each other, or play manually with
any agent.
"""

game = TicTacToeGame(3)

# all players
random_player1 = RandomPlayer(game, "Random1")
random_player2 = RandomPlayer(game, "Random1")
human_player = HumanPlayer(game, "Human")
args1 = dotdict({'numMCTSSims': 25, 'cpuct': 1.0})

# nnet players

checkpoints_dir = "./checkpoints/tictactoe/keras/3numIterations-25numEpisodes-15tempThreshold-0.6updateThreshold-200000maxlenOfQueue-25numMCTSSims-40arenaCompare-1cpuct-"
neural_net_player1 = KerasNeuralNetPlayer(game, args1, "Neural_1")
neural_net_player1.load_brain(checkpoints_dir, 'best.pth.tar')

if __name__ == '__main__':

    # arena = Arena(neural_net_player1, neural_net_player2, game, display=display)
    arena = Arena(human_player, neural_net_player1, game, display=display)
    results = arena.play_games(2, verbose=True)
    results_format = 'Results {0}'.format(results)
    print(results_format)
    print("")
コード例 #3
0
from .TicTacToeNNet import TicTacToeNNet as onnet

"""
NeuralNet wrapper class for the TicTacToeNNet.

Author: Evgeny Tyurin, github.com/evg-tyurin
Date: Jan 5, 2018.

Based on (copy-pasted from) the NNet by SourKream and Surag Nair.
"""

args = dotdict({
    'learningRate': 0.001,
    'dropout': 0.3,
    'epochs': 10,
    'batch_size': 64,
    'cuda': True,
    'num_channels': 512,
})

class NNetWrapper(NeuralNet):
    def __init__(self, game):
        self.nnet = onnet(game, args)
        self.board_x, self.board_y = game.getBoardSize()
        self.action_size = game.getActionSize()

    def train(self, examples:List[Tuple[Board,List[float],List[int]]]):
        """
        examples: list of examples, each example is of form (board, pi, v)
        """
        input_boards, target_pis, target_vs = list(zip(*examples))
コード例 #4
0
ファイル: NNet.py プロジェクト: blekinge/alpha-zero-general
import math
import sys

from alpha_zero.NeuralNet import NeuralNet
from alpha_zero.utils import dotdict

sys.path.append('..')


import argparse
from .OthelloNNet import OthelloNNet as onnet

args = dotdict({
    'lr': 0.001,
    'dropout': 0.3,
    'epochs': 10,
    'batch_size': 64,
    'cuda': False,
    'num_channels': 512,
})

class NNetWrapper(NeuralNet):
    def __init__(self, game):
        self.nnet = onnet(game, args)
        self.board_x, self.board_y = game.getBoardSize()
        self.action_size = game.getActionSize()

    def train(self, examples):
        """
        examples: list of examples, each example is of form (board, pi, v)
        """
        input_boards, target_pis, target_vs = list(zip(*examples))