Esempio n. 1
0
def startGame(screen):
    # set title and icon
    pygame.display.set_caption("2048")
    icon = pygame.image.load("img/2048_logo.png")
    pygame.display.set_icon(icon)

    # fill the window screen with brownies background
    surf = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.SRCALPHA)
    surf.fill(color["over"])
    screen.blit(surf, (0, 0))

    # welcome text to the user
    text = 'Welcome to 2048!\n\n' \
           'Your goal is to create the number 2048 by sliding the numbers on the grid\n' \
           'command are:\n'\
           'W to Move Up\n' \
           'S to Move Down\n'\
           'A to Move Left\n' \
           'D to Move Right\n' \
           'if you want to quit press Q\n\n'\
           'Good Luck! :) \n'

    # print on screen
    blit_text(screen, text, (70, 20), myfont)
    pygame.display.update()
    sleep(3)

    # initialize the game and run it
    board = Game(screen)
    board.playGame()
Esempio n. 2
0
def play_game(neuralNetwork, visual=False):
    game2048 = Game()
    moves = [['a', 's'], ['d', 'w']]

    while (not game2048.is_stale()):

        if game2048.stale_count == 0:
            state = game2048.get_state()
            inputs = []
            for num in state:
                val = 0 if num == 0 else math.log(num, 2)
                inputs += trans(int(val))
            outputs = neuralNetwork.eval(inputs)
            game2048.process_move(moves[outputs[0]][outputs[1]], visual)
        else:
            game2048.process_move(
                moves[random.randint(0, 1)][random.randint(0, 1)], visual)

    return game2048.score
Esempio n. 3
0
def play_game (neuralNetwork, visual=False):
	game2048 = Game ()
	moves = [['a', 's'], ['d', 'w']]

	while (not game2048.is_stale ()):

		if game2048.stale_count == 0:
			state = game2048.get_state ()
			inputs = []
			for num in state:
				val = 0 if num == 0 else math.log (num, 2)
				inputs += trans (int (val))
			outputs = neuralNetwork.eval (inputs)
			game2048.process_move (moves [outputs [0]] [outputs [1]], visual)
		else:
			game2048.process_move (moves [random.randint (0, 1)] [random.randint (0, 1)], visual)

	return game2048.score
Esempio n. 4
0
	def eval (self):
		_map = ['w', 's', 'a', 'd']

		scores = []

		for a in range (10):
			g = Game ()
			while (not g.is_stale ()):
				state_ = g.get_state ()
				state = [0 for a in range (16)]
				for i in range (len (state_)):
					if state_ [i] != 0:
						state [i] = math.log (state_ [i], 2) / 8.0

				in_ = np.array (state)
				out = self.graph.evaluate (in_) [0]
				g.process_move (_map [out])
			scores += [g.get_score ()]

		return (sum (scores) / 10.0) ** 2
Esempio n. 5
0
class qNet2048(object):
    EPOCH = 2000

    def __init__(self):
        self.net = Net(20, 50, 1)
        self.net.setEpoch(1)
        self.gamma = 0.8
        self.main()

    def main(self):
        self.train()
        self.playGame()

    def playGame(self):
        self.initNewGame()
        i = 0
        while self.gameRuning:
            print(' Move:', i)
            i += 1
            self.game.Print()
            (action, bestValue) = self.getMaxQ()
            self.game.Move(action)
        i += 1
        print(' Epoch:', i)
        self.game.Print()

    def train(self):
        for i in range(self.EPOCH):
            print('Game Epoch:', i + 1, '/', self.EPOCH, end='\r')
            self.initNewGame()
            while self.gameRuning:
                state = self.gridToVector()
                action = random.choice(list(DIRECTION))
                self.game.Move(action)
                (action, bestValue) = self.getMaxQ()
                inValue = state + self.directionToVector(action)
                newQ = self.game.GetLastMoveScore() + self.gamma * bestValue
                self.net.Train([inValue], [[newQ]])
            print('\nScore: ', self.game.GetTotalScore())
        print()

    def getMaxQ(self):
        directions = self.simDirections()
        best = max(directions, key=directions.get)
        return (best, directions[best][0])

    def simDirections(self):
        gridVector = self.gridToVector()
        result = {}
        for direction in DIRECTION:
            inputArray = gridVector[:] + self.directionToVector(direction)
            result[direction] = self.net.Sim(inputArray)
        return result

    def directionToVector(self, direction):
        if direction == DIRECTION.LEFT:
            return [1.0, 0.0, 0.0, 0.0]
        if direction == DIRECTION.RIGHT:
            return [0.0, 1.0, 0.0, 0.0]
        if direction == DIRECTION.UP:
            return [0.0, 0.0, 1.0, 0.0]
        if direction == DIRECTION.DOWN:
            return [0.0, 0.0, 0.0, 1.0]

    def gridToVector(self):
        tab = self.game.GetGrid()
        i = []
        for row in tab:
            i += row
        maxValue = max(i)
        return [x / maxValue for x in i]

    def initNewGame(self):
        self.game = Game()
        self.game.onGameOver(self.handleGameOver)
        self.gameRuning = True

    def handleGameOver(self):
        self.gameRuning = False
Esempio n. 6
0
 def initNewGame(self):
     self.game = Game()
     self.game.onGameOver(self.handleGameOver)
     self.gameRuning = True
Esempio n. 7
0
		# if not found, create a new one
		if not spec:
			spec = Species (state)
			r = RewardInfo ()
			self.species [s] = r
			return spec, r

		# return reward for inputted state
		return spec, self.species [spec]





g = Game ()
sm = StateManager ()


scores = []
random_moves = []
informed_moves = []
dif_states = []
x_data = []


running_length = 500
averages = []
num_games = int (4 * 1e3)

print 'Playing %s games.' % num_games