Exemplo n.º 1
0
def self_play(_round=50):
    for i in range(_round):
        print("self play round {}".format(i))
        with open('model.json', 'r') as json_file:
            stable_model = model_from_json(json_file.read())
        stable_model.load_weights('c4.hdf5')
        stable_agent = Agent(model=stable_model)
        mirror_game = Connect(m, n)

        def opposite():
            mirror_game.board = np.array(c4.get_state())
            for i in range(m):
                for j in range(n):
                    if mirror_game.board[i][j] == 1:
                        mirror_game.board[i][j] = 2
                    elif mirror_game.board[i][j] == 2:
                        mirror_game.board[i][j] = 1
            return stable_agent.predict(mirror_game)

        c4 = Connect(m, n, opposite=opposite)
        agent.train(c4, batch_size=10, nb_epoch=nb_epoch, epsilon=.1)
        print('saving')
        model.save_weights('c4.hdf5')
Exemplo n.º 2
0
from model import model
from qlearning4k import Agent
from flappy_bird import FlappyBird

game = FlappyBird(frame_rate=10000, sounds=False)
agent = Agent(model, memory_size=100000)
agent.train(game,
            epsilon=[0.01, 0.00001],
            epsilon_rate=0.3,
            gamma=0.99,
            nb_epoch=1000000,
            batch_size=32,
            checkpoint=250)
Exemplo n.º 3
0
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.optimizers import sgd
from qlearning4k.games import Catch, Snake
from qlearning4k import Agent

nb_frames = 1
grid_size = 10
hidden_size = 100

model = Sequential()
model.add(Flatten(input_shape=(nb_frames, grid_size, grid_size)))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(5))
model.compile(sgd(lr=.01), "mse")

game = Snake(grid_size)
agent = Agent(model)
agent.train(game, nb_epoch=5000)

agent.play(game)
Exemplo n.º 4
0
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.optimizers import sgd
from qlearning4k.games import Catch, Snake
from qlearning4k import Agent

nb_frames = 1
grid_size = 10
hidden_size = 100

game = Snake(grid_size)

model = Sequential()
model.add(Flatten(input_shape=(nb_frames, grid_size, grid_size)))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(game.nb_actions, activation='softmax'))
model.compile(sgd(lr=.2), "mse")

agent = Agent(model)
agent.train(game)
agent.play(game)
Exemplo n.º 5
0
from keras.models import Sequential
from keras.layers import Flatten, Dense
from qlearning4k.games import Catch
from keras.optimizers import *
from qlearning4k import Agent

grid_size = 10
hidden_size = 100
nb_frames = 1

model = Sequential()
model.add(Flatten(input_shape=(nb_frames, grid_size, grid_size)))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(3))
model.compile(sgd(lr=.2), "mse")

catch = Catch(grid_size)
agent = Agent(model=model)
agent.train(catch, batch_size=10, nb_epoch=1000, epsilon=.1)
agent.play(catch)
Exemplo n.º 6
0
from keras.models import Sequential
from keras.layers import Flatten, Dense
from qlearning4k.games import Catch
from keras.optimizers import *
from keras.models import model_from_json

from qlearning4k import Agent

grid_size = 10
hidden_size = 100
nb_frames = 1

#load

json_file = open('C:/temp/py/fariz/catch' + '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)

# load weights into new model
model.load_weights('C:/temp/py/fariz/catch' + '.h5')
print("Loaded model")

# evaluate
model.compile(sgd(lr=.2), "mse")

catch = Catch(grid_size)
agent = Agent(model=model)
agent.play(catch)
Exemplo n.º 7
0
time.sleep(1)

nb_frames = 10
nb_actions = len(car_game.get_possible_actions())
linput = len(car_game.get_state())

print("get_state()", car_game.get_state())
print("get_possible_actions()", car_game.get_possible_actions())

activation_method = 'sigmoid'

print("nb_actions", nb_actions)
model = Sequential()
model.add(Flatten(input_shape=(nb_frames, linput)))
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.7))
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation=activation_method))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.7))
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation=activation_method))
model.add(Dense(nb_actions))
model.compile(RMSprop(), 'MSE')

agent = Agent(model=model, memory_size=-1, nb_frames=nb_frames)
agent.train(car_game, batch_size=128, nb_epoch=5000, gamma=0.4)
agent.play(car_game)
Exemplo n.º 8
0
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import *
from qlearning4k import Agent

#------------------------------------
from server import Game
#------------------------------------

from keras import backend as K
K.set_image_dim_ordering('th')

grid_size = 21
nb_frames = 1
nb_actions = 5

model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', input_shape=(nb_frames, grid_size, grid_size, 3)))
model.add(Conv2D(32, (4, 4), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(nb_actions))
model.compile(RMSprop(), 'MSE')

#------------------------------------
game = Game()
#------------------------------------

agent = Agent(model=model, memory_size=-1, nb_frames=nb_frames)
agent.train(game, batch_size=1, nb_epoch=10000, gamma=0.8)
agent.play(game)
Exemplo n.º 9
0
from keras.models import Sequential,model_from_json
from keras.layers import *
from keras.optimizers import *
from qlearning4k import Agent
from my2048 import mGame

grid_size = 4
nb_frames = 1
nb_actions = 4

model = model_from_json(open('2048_model.json', 'rb').read())
print model.input_shape

m2048 = mGame(vis=True)
#m2048 = mGame()
print m2048.get_frame().shape

agent = Agent(model=model, memory_size=65536, nb_frames=nb_frames)
model.load_weights('2048_weights.h5')
model.summary()
print model.get_weights()

agent.play(m2048, epsilon=0.1, visualize=False)
Exemplo n.º 10
0
from keras.models import Sequential
from keras.layers import *
from qlearning4k.games import Snake
from keras.optimizers import *
from qlearning4k import Agent

from keras import backend as K
K.set_image_dim_ordering('th')

grid_size = 10
nb_frames = 4
nb_actions = 5

model = Sequential()
model.add(
    Conv2D(16, (3, 3),
           activation='relu',
           input_shape=(nb_frames, grid_size, grid_size)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(nb_actions))
model.compile(RMSprop(), 'MSE')

snake = Snake(grid_size)

agent = Agent(model=model, memory_size=-1, nb_frames=nb_frames)
agent.train(snake, batch_size=64, nb_epoch=2000, gamma=0.8)
agent.play(snake)
Exemplo n.º 11
0
                 activation='relu'))
model.add(Conv2D(32, (3, 3), data_format=DF, padding='same',
                 activation='relu'))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(4, activation='linear'))

#print(model.summary())

model.compile(optimizer=Adam(1e-2), loss='mse')

enemy_model = Sequential.from_config(model.get_config())
enemy_model.compile(optimizer=Adam(1e-2), loss='mse')

tron = Tron(enemy_model)
agent = Agent(model=model)

print('Initial phase')

agent.train(game=tron,
            epsilon=(1.0, 0.1),
            epsilon_rate=0.5,
            batch_size=32,
            nb_epoch=10000,
            gamma=0.9,
            checkpoint=500)
agent.play(tron, nb_epoch=1)
tron.update_ai_model(agent.model)

for i in range(10):
    print('Phase #', i + 1)
Exemplo n.º 12
0
from model import model
from qlearning4k import Agent
from flappy_bird import FlappyBird

game = FlappyBird(frame_rate=30, sounds=True)
model.load_weights('weights.dat')
agent = Agent(model)
agent.play(game, nb_epoch=100, epsilon=0.01, visualize=False)
Exemplo n.º 13
0
                    3,
                    subsample=(2, 2),
                    activation='relu',
                    dim_ordering='th')(val)
val = Convolution2D(128,
                    3,
                    3,
                    subsample=(2, 2),
                    activation='relu',
                    dim_ordering='th')(val)
val = Convolution2D(256,
                    3,
                    3,
                    subsample=(2, 2),
                    activation='relu',
                    dim_ordering='th')(val)
val = Flatten()(val)
val = Dense(1)(val)
val = RepeatVector(nb_actions)(val)
val = Flatten()(val)

y_out = merge([adv, val], mode='sum')

model = Model(input=x_in, output=y_out)
model.compile(optimizer='rmsprop', loss='mse')

agent = Agent(model=model, memory_size=-1, nb_frames=nb_frames)
agent.train(ql4kgame, batch_size=64, nb_epoch=300, gamma=0.7)
model.save("dqn_model.h5")
agent.play(ql4kgame)
Exemplo n.º 14
0
from keras.models import Sequential
from keras.layers import Flatten, Dense
from qlearning4k.games import Catch
from keras.optimizers import *
from qlearning4k import Agent

grid_size = 10
hidden_size = 100
nb_frames = 1

model = Sequential()
model.add(Flatten(input_shape=(nb_frames, grid_size, grid_size)))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(3))
model.compile(sgd(lr=.2), "mse")

catch = Catch(grid_size)
agent = Agent(model=model)
agent.train(catch, batch_size=10, nb_epoch=500, epsilon=.1)
agent.play(catch)

# serialize model to JSON
model_json = model.to_json()
with open('C:/temp/py/fariz/catch' + '.json', 'w') as json_file:
    json_file.write(model_json)

# serialize weights to HDF5
model.save_weights('C:/temp/py/fariz/catch' + '.h5')
Exemplo n.º 15
0
nb_frames = 1
nb_actions = 4

model = Sequential()
model.add(BatchNormalization(axis=1, mode=2, input_shape=(nb_frames, grid_size, grid_size)))
#model.add(Convolution2D(256, nb_row=4, nb_col=4, input_shape=(nb_frames, grid_size, grid_size)))
#model.add(Activation('relu'))
#model.add(Convolution2D(64, nb_row=2, nb_col=2, activation='relu'))
#model.add(Convolution2D(32, nb_row=4, nb_col=4, activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(nb_actions))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
#model.compile(RMSprop(), 'MSE')
open('2048_model.json', 'wb').write(model.to_json())
print model.input_shape

m2048 = mGame()
print m2048.get_frame().shape

agent = Agent(model=model, memory_size=65536, nb_frames=nb_frames)
agent.train(m2048, batch_size=512, nb_epoch=10, epsilon=(.9,.05), gamma=0.1)
model.save_weights('2048_weights.h5', overwrite=True)

#agent.play(m2048)
Exemplo n.º 16
0
from keras.models import Sequential
from keras.layers import *
from qlearning4k.games import Snake
from keras.optimizers import *
from qlearning4k import Agent

grid_size = 10
nb_frames = 4
nb_actions = 5

model = Sequential()
model.add(BatchNormalization(axis=1, input_shape=(nb_frames, grid_size, grid_size)))
model.add(Convolution2D(16, nb_row=3, nb_col=3, activation='relu'))
model.add(Convolution2D(32, nb_row=3, nb_col=3, activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(nb_actions))
model.compile(RMSprop(), 'MSE')

snake = Snake(grid_size)

agent = Agent(model=model, memory_size=-1, nb_frames=nb_frames)
agent.train(snake, batch_size=64, nb_epoch=10000, gamma=0.8)
agent.play(snake)
Exemplo n.º 17
0
model = Sequential()
model.add(
    Conv2D(32, (3, 3),
           activation='relu',
           input_shape=(nb_frames, m, n),
           data_format="channels_first"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n))
model.compile(RMSprop(), "mse")

agent = Agent(model=model)


def random_play(_round=20):
    for i in range(_round):
        print("random play round {}".format(i))
        c4 = Connect(m, n)  #, opposite=opposite)
        agent.train(c4, batch_size=10, nb_epoch=nb_epoch, epsilon=.1)
        print('saving')
        model.save_weights('c4.hdf5')


def self_play(_round=50):
    for i in range(_round):
        print("self play round {}".format(i))
        with open('model.json', 'r') as json_file:
Exemplo n.º 18
0
grid_size = 10
nb_frames = 4
nb_actions = 5

snake = Snake(grid_size)

model = load_model('models/snake.hdf5')
#model = Sequential()
#model.add(Conv2D(16, (3, 3), activation='relu', input_shape=(nb_frames, grid_size, grid_size)))
#model.add(Conv2D(32, (3, 3), activation='relu'))
#model.add(Flatten())
#model.add(Dense(256, activation='relu'))
#model.add(Dense(nb_actions))
#model.compile(RMSprop(), 'MSE')

agent = Agent(model=model, memory_size=-1, nb_frames=nb_frames)
#model.save('/tmp/snake1.hdf5')
#agent.train(snake, batch_size=64, nb_epoch=10000, gamma=0.8)
#model.save('/tmp/snake2.hdf5')
#agent.play(snake)

snake.reset()
agent.clear_frames()
S = agent.get_game_data(snake)
game_over = False
frames = list()
frames.append(S[0])
while not game_over:
    q = model.predict(S)[0]
    possible_actions = snake.get_possible_actions()
    q = [q[i] for i in possible_actions]
Exemplo n.º 19
0
from keras.models import Sequential
from keras.layers import *
from qlearning4k.games import Frogger
from keras.optimizers import *
from qlearning4k import Agent

rows = 10
cols = 10
hidden_size = 100
nb_frames = 1

model = Sequential()
model.add(Flatten(input_shape=(nb_frames, rows, cols)))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(5, activation='softmax'))
model.compile(sgd(lr=.2), "mse")

game = Frogger(rows, cols)
agent = Agent(model=model)
agent.train(game, batch_size=50, nb_epoch=10000, epsilon_rate=0.2)
agent.play(game, nb_epoch=10)