예제 #1
0
def data_generator_for_CRNN(score_to_begin, score_to_win, batch_size):
    datas = []
    labels = []
    cnt = 0
    while 1:
        game = Game(score_to_win = score_to_win, random = False)
        agent = ExpectiMaxAgent(game)
        while game.end == 0:
            step = agent.step()
            if game.score >= score_to_begin:
                board = board2array(game)
                board1 = np.swapaxes(board, 1, 2)
                board2 = np.swapaxes(board1, 0, 1).reshape((16, 4, 4, 1))
                
                datas.append(board2)
                labels.append(step2array(step))
                cnt += 1
            game.move(step)
            if cnt == batch_size:
                cnt = 0
                datas = np.array(datas)
                labels = np.array(labels)
                yield (datas, labels)
                datas = []
                labels = []
def testAgent(n_tests, game_size, score_to_win, model, max_iter=1000):
    acc, total = 0, 0
    for i in trange(n_tests):
        game, n_iter = Game(game_size, score_to_win), 0
        target = ExpectiMaxAgent(game)
        while n_iter < max_iter and not target.game.end:
            dir_ = Greedy_Action(game, model)
            target_dir = target.step()
            n_iter += 1
            total += 1
            if dir_ == target_dir:
                acc += 1
            target.game.move(dir_)
    return acc / total
예제 #3
0
def data_generator_for_CNN(score_to_begin, score_to_win, batch_size):
    datas = []
    labels = []
    cnt = 0
    while 1:
        game = Game(score_to_win = score_to_win, random = False)
        agent = ExpectiMaxAgent(game)
        while game.end == 0:
            step = agent.step()
            if game.score >= score_to_begin:
                datas.append(board2array(game))
                labels.append(step2array(step))
                cnt += 1
            game.move(step)
            if cnt == batch_size:
                cnt = 0
                datas = np.array(datas)
                labels = np.array(labels)
                yield (datas, labels)
                datas = []
                labels = []
예제 #4
0
def data_generator(batch_size):
    datas = []
    labels = []
    cnt = 0
    while 1:
        game = Game(score_to_win = 2048, random = False)
        agent = ExpectiMaxAgent(game)
        while game.end == 0:
            step = agent.step()
            board = game.board / 11
            board1 = board.T
            datas.append(np.hstack((board, board1)))
            labels.append(step2array(step))
            cnt += 1
            game.move(step)
            if cnt == batch_size:
                cnt = 0
                datas = np.array(datas)
                labels = np.array(labels)
                yield (datas, labels)
                datas = []
                labels = []
예제 #5
0
from game2048.game import Game
from game2048.agents import ExpectiMaxAgent
from game2048.agents import MyAgent
from game2048.displays import Display
import csv
import os

game_size = 4
score_to_win = 2048
iter_num = 3000

game = Game(game_size, score_to_win)
board = game.board
agenta = ExpectiMaxAgent(game, Display())
agentb = MyAgent(game, Display())
directiona = agenta.step()
directionb = agentb.step()
board = game.move(directionb)

i = 0
dic = {}
idx = 0

# save file
filename = '/home/olivia/PycharmProjects/2048/game2048/data/traindata10.csv'
if os.path.exists(filename):
    start = True
else:
    start = False
    os.mknod(filename)
예제 #6
0
NUM_CLASSES = 4
NUM_EPOCHS = 20

display1 = Display()
display2 = IPythonDisplay()
model = keras.models.load_model('model.h5')

image = []
label = []
for i in range(0, 10):
    game = Game(4, score_to_win=2048, random=False)
    agent = ExpectiMaxAgent(game, display=display1)

    while game.end == False:

        direction = agent.step()
        image.append(game.board)
        label.append(direction)
        game.move(direction)

    display1.display(game)
#运行10次游戏并记录棋盘和方向
x_train = np.array(image)
y_train = np.array(label)

x_train = np.log2(x_train + 1)
x_train = np.trunc(x_train)
x_train = keras.utils.to_categorical(x_train, 12)

print(x_train.shape)
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
예제 #7
0
        game = Game(4, score_to_win=2048, random=False)
        agent1 = ExpectiMaxAgent(game, display=display1)

        while game.end == False:
            if np.sum(game.board) > 384:
                break
            a = np.array(game.board)
            a = np.log2(a + 1)
            a = np.trunc(a)
            a = keras.utils.to_categorical(a, board_class)
            a = a.reshape(1, 4, 4, board_class)
            prediction = model.predict(a, batch_size=128)
            b = prediction[0]
            b = b.tolist()
            direction2 = b.index(max(b))
            direction1 = agent1.step()

            boards.append(game.board)
            directions.append(direction1)
            game.move(direction2)
        display1.display(game)
        if np.amax(game.board) == 1024:
            count += 1

    if count > 98:
        break
    else:
        boards = np.array(boards)
        directions = np.array(directions)

        x_train, x_test, y_train, y_test = train_test_split(boards,
예제 #8
0
#             for j in range(4):
#                 #f.write(game.board[i,j])
#                 print(game.board[i, j], file = f2)
#         print(direction, file = f2)
#         #f.write(direction)

#         game.move(direction)

#     #f.write('\n')

for i in range(300):
    print("i = ", i)
    game = Game(size=GAME_SIZE, score_to_win=SCORE_TO_WIN0)
    agent = ExpectiMaxAgent(game=game)
    while True:
        direction = agent.step()
        if (game.end != 0):
            break
#        print (game.board)
#        print ("direction: ", direction)
        if game.board.max() < 256:

            for i in range(4):
                for j in range(4):
                    #f.write(game.board[i,j])
                    print(game.board[i, j], file=f1)
            print(direction, file=f1)

        elif game.board.max() < 512:

            for i in range(4):
예제 #9
0
results = []
direction = []
while i < num:
    game = Game(4, score_to_win=2048, random=False)
    agent_exp = ExpectiMaxAgent(game)
    agent = MyAgent(game)
    while (game.score <= 1024) and (not game.end):
        A = game.board
        A[A == 0] = 1
        A = np.log2(A)
        A = np.int32(A)
        A = A.reshape(16)
        dir = agent.step()
        # you can change the condition to get different data
        if game.score >= 512:
            dir_exp = agent_exp.step()
            results.append(A)
            direction.append(dir_exp)
        game.move(dir)
    if 0 == i % 100:
        # save the result every 100 games
        results = np.array(results)
        direction = np.array(direction)
        final_results = np.c_[results, direction]
        final_results = pd.DataFrame(final_results)
        final_results.to_csv("data/data_online_1024.csv",
                             index=False,
                             header=False,
                             mode='a+')

        results = []
예제 #10
0
label=[]

display1 = Display()
display2 = Display()

stop_number = 2048
size = int(np.log2(stop_number)) +1    #跑到stop number时所需的one-hot编码位数

for i in range(0,500):   #跑500次棋盘,跑到stop_number停止
    game = Game(4, score_to_win=2048, random=False)
    agent = ExpectiMaxAgent(game, display=display1)  #使用强Agent
    
    while game.end==False:
        a=np.array(game.board)
        
        direction=agent.step()
        image.append(game.board)
        label.append(direction)
        game.move(direction)
        if np.amax(a)==stop_number:
            break
       
    display1.display(game)
    
image=np.array(image)   #将得到的数据和标签转换为numpy数组
label=np.array(label)


#划分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(image, label, test_size = 0.1, random_state= 30)
예제 #11
0
def board_log2(board):
    for i in range(16):
        if board[i] != 0:
            board[i] = np.log2(board[i])
    return board


# Set the needed number of games
# Limit the max number of steps given by Expectimax each game
NUM_GAMES = 10000
MAX_ITER = 1e3

# This loop takes really a long time
# Use trange to monitor the execution status
for i in trange(NUM_GAMES):
    game = Game(size=4, score_to_win=2048)
    target = ExpectiMaxAgent(game)
    n_iter = 0
    data = np.zeros((0, 17), dtype=float)
    # To avoid frequent I/O operation,
    # data is written into .csv after an entire game rather than one step
    while n_iter < MAX_ITER and not target.game.end:
        dir_ = target.step()
        x = board_log2(np.reshape(target.game.board, newshape=(16, )))
        item = np.hstack((x, dir_))
        data = np.vstack([data, item])
        target.game.move(dir_)
        n_iter += 1
    df = pd.DataFrame(data, columns=None, index=None)
    df.to_csv('./Data_Compressed.csv', mode='a', index=False, header=False)
# print(df)
예제 #12
0
from game2048.displays import Display, IPythonDisplay
from game2048.agents import Agent, RandomAgent, ExpectiMaxAgent,YourOwnAgent,SimpleNet
import numpy as np
import pandas as pd

display1 = Display()
display2 = IPythonDisplay()

%%time

for i in range(0,3000):
    print(i)
    game = Game(4, score_to_win=2048, random=False)
    #display2.display(game)
    agent = ExpectiMaxAgent(game, display=display2)
    max_iter=np.inf
    n_iter = 0
    
    while (n_iter < max_iter) and (not game.end):
        tmp1 = game.board.reshape(1,16)
        direction = np.array(agent.step())
        tmp3 = direction.reshape(1,1)

        a = np.hstack((tmp1,tmp3))
        
        
        n_iter += 1
        agent.game.move(direction)
        df1 = pd.DataFrame((a))
        df1.to_csv('lastdata.xls',index=0,mode='a',header=0)
예제 #13
0
model_512 = load_model('myAgent_512.h5')
model_1024 = load_model('myAgent_1024.h5')

boards_256 = []
boards_512 = []
boards_1024 = []
directions_256 = []
directions_512 = []
directions_1024 = []

for i in range(30000):
    print('i = ', i)
    game = Game(size=4)
    expectiMaxAgent = ExpectiMaxAgent(game=game)
    while True:  # one turn
        rightDirection = expectiMaxAgent.step()
        if (game.end == True):  # game over
            break
        maxNum = 0
        for p in range(4):
            for q in range(4):
                if game.board[p, q] > maxNum:
                    maxNum = game.board[p, q]
        if maxNum == 2048:  # start the next turn
            break
        inputboard = np.zeros((1, 4, 4, 16))
        for p in range(4):
            for q in range(4):
                num = game.board[p, q]
                if num == 0:
                    inputboard[0, p, q, 0] = 1