Пример #1
0
def create_dataset(path, player=rdeep.Bot(), games=2000, phase=1):

    data = []
    target = []

    # For progress bar
    bar_length = 30
    start = time.time()

    for g in range(games - 1):

        # For progress bar
        if g % 10 == 0:
            percent = 100.0 * g / games
            sys.stdout.write('\r')
            sys.stdout.write("Generating dataset: [{:{}}] {:>3}%".format(
                '=' * int(percent / (100.0 / bar_length)), bar_length,
                int(percent)))
            sys.stdout.flush()

        # Randomly generate a state object starting in specified phase.
        state = State.generate(phase=phase)

        state_vectors = []

        while not state.finished():

            # Give the state a signature if in phase 1, obscuring information that a player shouldn't see.
            given_state = state.clone(signature=state.whose_turn()
                                      ) if state.get_phase() == 1 else state

            # Add the features representation of a state to the state_vectors array
            state_vectors.append(features(given_state))

            # Advance to the next state
            move = player.get_move(given_state)
            state = state.next(move)

        winner, score = state.winner()

        for state_vector in state_vectors:
            data.append(state_vector)

            if winner == 1:
                result = 'won'

            elif winner == 2:
                result = 'lost'

            target.append(result)

    with open(path, 'wb') as output:
        pickle.dump((data, target), output, pickle.HIGHEST_PROTOCOL)

    # For printing newline after progress bar
    print(
        "\nDone. Time to generate dataset: {:.2f} seconds".format(time.time() -
                                                                  start))

    return data, target
def create_dataset(path,
                   player=rdeep.Bot(),
                   good_games=0,
                   bad_games=5000,
                   phase=1):  # change good/bad bot numbers
    """Create a dataset that can be used for training the ML bot model.
    The dataset is created by having the player (bot) play games against itself.
    The games parameter indicates how many games will be started.
    
    Each game will be played and the game situations will be stored.
    Then, the game ends and it is recorded whether the game situations resulted in a win or loss for player 1.
    In other words, each game situation is stored with the corresponding class label (won/lost).

    Keyword arguments
    path -- the pathname where the dataset is to be stored
    player -- the player which will play against itself, default the rand Bot
    games -- the number of games to play, default 2000
    phase -- wheter to start the games in phase 1, the default, or phase 2
    """

    data = []
    target = []

    # For progress bar
    bar_length = 30
    start = time.time()

    for g in range(good_games - 1):

        # For progress bar
        if g % 10 == 0:
            percent = 100.0 * g / good_games
            sys.stdout.write('\r')
            sys.stdout.write("Generating good dataset: [{:{}}] {:>3}%".format(
                '=' * int(percent / (100.0 / bar_length)), bar_length,
                int(percent)))
            sys.stdout.flush()

        # Randomly generate a state object starting in specified phase.
        state = State.generate(phase=phase)

        state_vectors = []

        while not state.finished():
            # Give the state a signature if in phase 1, obscuring information that a player shouldn't see.
            given_state = state.clone(signature=state.whose_turn()
                                      ) if state.get_phase() == 1 else state

            # Add the features representation of a state to the state_vectors array
            state_vectors.append(features(given_state))

            # Advance to the next state
            move = player.get_move(given_state)
            state = state.next(move)

        winner, score = state.winner()

        for state_vector in state_vectors:
            data.append(state_vector)

            if winner == 1:
                result = 'won'
            elif winner == 2:
                result = 'lost'

            target.append(result)
        print(" This is the result %s" % result)

    for g in range(bad_games - 1):  # the added section, entire for loop

        # For progress bar
        if g % 10 == 0:
            percent = 100.0 * g / bad_games
            sys.stdout.write('\r')
            sys.stdout.write("Generating bad dataset: [{:{}}] {:>3}%".format(
                '=' * int(percent / (100.0 / bar_length)), bar_length,
                int(percent)))
            sys.stdout.flush()

        # Randomly generate a state object starting in specified phase.
        state = State.generate(phase=phase)

        state_vectors = []

        while not state.finished():
            # Give the state a signature if in phase 1, obscuring information that a player shouldn't see.
            given_state = state.clone(signature=state.whose_turn()
                                      ) if state.get_phase() == 1 else state

            # Add the features representation of a state to the state_vectors array
            state_vectors.append(features(given_state))

            # Advance to the next state
            move = player.get_move(given_state)
            state = state.next(move)

        winner, score = state.winner()

        for state_vector in state_vectors:  # TODO: Myléne - make changes here for random labels
            data.append(state_vector)

            if winner == 1:
                result = random.choice(['won', 'lost'])

            elif winner == 2:
                result = random.choice(['won', 'lost'])

            target.append(result)

    with open(path, 'wb') as output:
        pickle.dump((data, target), output, pickle.HIGHEST_PROTOCOL)

    # For printing newline after progress bar
    print(
        "\nDone. Time to generate dataset: {:.2f} seconds".format(time.time() -
                                                                  start))

    return data, target
    dest="overwrite",
    action="store_true",
    help=
    "Whether to create a new dataset regardless of whether one already exists at the specified path."
)

parser.add_argument("--no-train",
                    dest="train",
                    action="store_false",
                    help="Don't train a model after generating dataset.")

options = parser.parse_args()

if options.overwrite or not os.path.isfile(options.dset_path):
    create_dataset(options.dset_path,
                   player=rdeep.Bot(),
                   good_games=0,
                   bad_games=5000)

if options.train:

    # Play around with the model parameters below

    # HINT: Use tournament fast mode (-f flag) to quickly test your different models.

    # The following tuple specifies the number of hidden layers in the neural
    # network, as well as the number of layers, implicitly through its length.
    # You can set any number of hidden layers, even just one. Experiment and see what works.
    hidden_layer_sizes = (64, 32)

    # The learning rate determines how fast we move towards the optimal solution.
parser.add_argument("-d",
                    dest="create_dataset",
                    action="store_true",
                    help="Whether to create a new dataset regardless of whether one already exists")

parser.add_argument("--no-train",
                    dest="train",
                    action="store_false",
                    help="Don't train a model after generating dataset.")


options = parser.parse_args()

if options.create_dataset or not os.path.isfile(options.path):
    create_dataset(options.path, player=rdeep.Bot(), games=10000)

if options.train:

    # Play around with the model parameters below

    # HINT: Use tournament fast mode (-f flag) to quickly test your different models.

    # The following tuple specifies the number of hidden layers in the neural
    # network, as well as the number of layers, implicitly through its length.
    # You can set any number of hidden layers, even just one. Experiment and see what works.
    hidden_layer_sizes = (64, 32)

    # The learning rate determines how fast we move towards the optimal solution.
    # A low learning rate will converge slowly, but a large one might overshoot.
    learning_rate = 0.0001
Пример #5
0
parser.add_argument("-o", "--overwrite",
                    dest="overwrite",
                    action="store_true",
                    help="Whether to create a new dataset regardless of whether one already exists at the specified path.")

parser.add_argument("--no-train",
                    dest="train",
                    action="store_false",
                    help="Don't train a model after generating dataset.")


options = parser.parse_args()

if options.overwrite or not os.path.isfile(options.dset_path):
    create_dataset(options.dset_path, player=rdeep.Bot(), games=700000)

if options.train:

    # Play around with the model parameters below

    # HINT: Use tournament fast mode (-f flag) to quickly test your different models.

    # The following tuple specifies the number of hidden layers in the neural
    # network, as well as the number of layers, implicitly through its length.
    # You can set any number of hidden layers, even just one. Experiment and see what works.
    hidden_layer_sizes = (64, 32)

    # The learning rate determines how fast we move towards the optimal solution.
    # A low learning rate will converge slowly, but a large one might overshoot.
    learning_rate = 0.0001
Пример #6
0
from bots.kbbot import kbbot
from bots.rdeep import rdeep
from bots.projectbot import projectbot

from bots.ml.ml import features

#from bots.unsupervised.unsupervised import features
# How many games to play
GAMES = 1000

# Which phase the game starts in
PHASE = 1

# The player we'll observe
#player = rand.Bot()
player1 = rdeep.Bot()
player2 = projectbot.Bot()
is_player = False

data = []
target = []

for g in range(GAMES):

    # Randomly generate a state object starting in specified phase.
    state = State.generate(phase=PHASE)
    if player1 == False:
        player = player1
        is_player = True
    else:
        player = player2