示例#1
0
    def __init__(self):
        '# Initialize game window and settings etc.'
        self.screen = pygame.display.set_mode((1000, 1000))

        pygame.display.set_caption("Asteroids")
        pygame.font.init()
        self.myfont = pygame.font.SysFont('Comic Sans MS', 30)
        self.debug_text_surface = self.myfont.render('Hello', False,
                                                     (255, 255, 255))

        self.render_pace: float = 1 / 60
        self.game_active = True
        asteroid_list = []
        bullet_list = []

        ship = Ship(self.screen, Point(400, 500), Point(0, 0), -math.pi / 2, 1)
        enemy_ship = Ship(self.screen, Point(600, 500), Point(0, 0),
                          -math.pi / 2, 1)
        self.game_state = GameState(ship, enemy_ship, bullet_list,
                                    asteroid_list)

        for count in range(0, 8):
            asteroids = Asteroid(self.screen,
                                 Point(randint(0, 900), randint(0, 900)),
                                 Point(randint(-20, 20), randint(-20, 20)),
                                 randint(120, 170))
            list.append(asteroid_list, asteroids)

        self.movement_manager = MovementManager(self.render_pace, 1000, 1000)
        self.collision_manager = CollisionManager()
示例#2
0
def main():
    #初始化pygame
    pygame.init()
    #创建屏幕对象
    #设置分辨率
    screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
    #设置窗口
    pygame.display.set_caption("俄罗斯方块")
    #让按键看起来更连续一些
    pygame.key.set_repeat(10, 100)
    #背景颜色RGB
    bg_color = BACKGROUND_COLOR
    random.seed(int(time.time()))
    game_state = GameState(screen)
    game_resource = GameResource()
    #游戏主循环
    while True:
        #判断方块是否落在最底部
        if game_state.piece and game_state.piece.is_on_bottom:
            game_state.wall.add_to_wall(game_state.piece)
            game_state.add_score(game_state.wall.eliminate_lines())
            game_state.piece = Piece(random.choice(PIECE_TYPES), screen,
                                     game_state.wall)

        check_events(game_state)
        #设置屏幕背景颜色
        screen.fill(bg_color)
        #绘制游戏区域,网格线和墙体
        GameDisplay.draw_game_area(screen, game_state, game_resource)

        if game_state.piece:
            game_state.piece.paint()
        #刷新屏幕
        pygame.display.flip()
def runGame(p1, p2):
    p1.resetForNewGame()
    p2.resetForNewGame()
    d1 = Dealer(5)
    p1.cards = d1.dealCards()[0]
    p2.cards = d1.dealCards()[0]
    gameState = GameState(p1, p2)
    aqlearnScore = 0
    gameLoop = True
    while gameLoop:
        p1.pickCard()
        p2.pickCard()

        roundWinner = gameState.judgeRound(p1, p2)

        #? generating successor
        if roundWinner == p1:
            p1.accumulatedCards[p1.playedCard[1]] += 1
        if roundWinner == p2:
            p2.accumulatedCards[p2.playedCard[1]] += 1

        if gameState.judgeGameOver(p1, p2) == p1:
            return p1.name
            
        elif gameState.judgeGameOver(p1, p2) == p2:
            return p2.name

        p1.cards.append(d1.generateRandomCard())
        p2.cards.append(d1.generateRandomCard())
示例#4
0
    def newGame(self,
                layout,
                pacmanType,
                ghostType,
                display,
                agentOpts,
                quiet=False,
                catchExceptions=False):
        # TODO done
        # agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()]
        agents = []
        # agents = pacmanAgent[:layout.numPacman] + \
        #     ghostAgents[:layout.getNumGhosts()]
        initState = GameState()
        initState.initialize(layout)

        for index, agentState in enumerate(initState.data.agentStates):
            if agentState.isPacman:
                agents.append(pacmanType(agentOpts, index, agentState))
            else:
                agents.append(ghostType(agentOpts, index, agentState))

        game = Game(agents, display, self, catchExceptions=catchExceptions)
        game.state = initState
        initState.game = game

        self.initialState = initState.deepCopy()

        self.quiet = quiet
        return game
示例#5
0
    def execute_one_game(self, pac_individual, ghost_individual):
        """
        Execute one game / eval of a run given a Pac individual and
        Ghost individual selected from their respective populations.
        """
        # Pick a new map and set up a new game state.
        game_map = self.experiment.pre_loaded_maps[random.randint(0, 99)]
        self.experiment.world_data = []
        game_state = GameState(game_map, self.experiment.pill_density,
                               self.experiment.time_multiplier,
                               self.experiment.fruit_spawning_probability,
                               self.experiment.fruit_score,
                               self.experiment.num_pacs,
                               self.experiment.num_ghosts)
        game_state.write_world_config(self.experiment.world_data)
        game_state.write_world_time_score(self.experiment.world_data)

        # Create new Pac and Ghost controllers
        for curr_pac_id in range(self.experiment.num_pacs):
            self.pac_controllers[curr_pac_id] = PacController(
                curr_pac_id, pac_individual)
        for curr_ghost_id in range(self.experiment.num_ghosts):
            self.ghost_controllers[curr_ghost_id] = GhostController(
                curr_ghost_id, ghost_individual)

        # While the game isn't over, play game turns.
        game_over = False
        while (not game_over):
            game_over = game_state.play_turn(self.experiment.world_data,
                                             self.pac_controllers,
                                             self.ghost_controllers)

        # Set Pac fitness and implement parsimony pressure
        pac_individual.fitness = game_state.score
        if (self.pac_pop.parsimony_technique == 'size'):
            pac_individual.fitness -= (self.pac_pop.pppc *
                                       pac_individual.root.size)
        else:
            pac_individual.fitness -= (self.pac_pop.pppc *
                                       pac_individual.root.height)

        # Set Ghost fitness and implement parsimony pressure
        ghost_individual.fitness = -(game_state.score)
        if (game_state.ghost_won):
            ghost_individual.fitness += int(
                (game_state.time * 100.0) / game_state.orig_time)
        if (self.ghost_pop.parsimony_technique == 'size'):
            ghost_individual.fitness -= (self.ghost_pop.pppc *
                                         ghost_individual.root.size)
        else:
            ghost_individual.fitness -= (self.ghost_pop.pppc *
                                         ghost_individual.root.height)

        # Set Pac and Ghost scores
        pac_individual.score = game_state.score  # Score is raw game score without parsimony pressure for Pac
        ghost_individual.score = ghost_individual.fitness  # Score and fitness interchangeable for Ghost
示例#6
0
def runGame(searchAlgo, initialGameState):

    # Create a current state object and initialize it for given input
    currentState = GameState()
    currentState.initializeGame(initialGameState)
    printPuzzle(currentState)  # To view whats going on

    # Send the initial state to the SearchAgent along with searchType
    searchType = SearchAgent(searchAlgo)
    searchType.registerInitialState(currentState)
    searchType.generateOutFile()
    def execute_one_game(self, weights):
        """
        Execute one game / eval of a run.

        Initialize the Pac controller with the given weights.

        Return score.
        """
        # Pick a new map and set up a new game state.
        game_map = self.experiment.pre_loaded_maps[random.randint(0, 99)]
        self.experiment.world_data = []
        game_state = GameState(game_map, self.experiment.pill_density,
                               self.experiment.time_multiplier,
                               self.experiment.fruit_spawning_probability,
                               self.experiment.fruit_score,
                               self.experiment.num_pacs,
                               self.experiment.num_ghosts)
        game_state.write_world_config(self.experiment.world_data)
        game_state.write_world_time_score(self.experiment.world_data)

        # Create a new Pac controller with a hard-coded expression tree
        # to add a weighted sum of G, P, W, and F.
        g_node = Node(expr='*',
                      left=Node('constant', constant=weights[0]),
                      right=Node('G'))
        p_node = Node(expr='*',
                      left=Node('constant', constant=weights[1]),
                      right=Node('P'))
        w_node = Node(expr='*',
                      left=Node('constant', constant=weights[2]),
                      right=Node('W'))
        f_node = Node(expr='*',
                      left=Node('constant', constant=weights[3]),
                      right=Node('F'))
        add1_node = Node(expr='+', left=g_node, right=p_node)
        add2_node = Node(expr='+', left=w_node, right=f_node)
        root_node = Node(expr='+', left=add1_node, right=add2_node)
        self.pac_controllers[0] = PacController(0, ExprTree(root_node))

        # While the game isn't over, play game turns.
        game_over = False
        while (not game_over):
            game_over = game_state.play_turn(self.experiment.world_data,
                                             self.pac_controllers,
                                             self.ghost_controllers)

        return game_state.score
示例#8
0
    def testExtration(self):
        featureExt = FeatureExtractor()
        agent = Player("aql agent")
        enemy = Player("greedy agent")
        gameState = GameState(agent, enemy)
        enemy.accumulatedCards["Water"] += 1
        enemy.accumulatedCards["Fire"] += 1
        features = featureExt.getFeatures(gameState, "action", agent.name)
        self.assertEqual(features["enemy-distance-to-closest-win"], 1)
        self.assertEqual(features["agent-distance-to-closest-win"], 4)

        agent.cards.append((1, "Water"))
        enemy.accumulatedCards["Fire"] -= 1
        enemy.accumulatedCards["Water"] += 1

        features = featureExt.getFeatures(gameState, "action", agent.name)
        self.assertEqual(features["agent-distance-to-closest-win"], 3)
        self.assertEqual(features["enemy-distance-to-closest-win"], 1)
示例#9
0
    def __init__(self, verbose, agentType, nHands, startingMoney, nTraining):
        """
        Initialize the game! Create dealer and player objects and an initial gameState
        input: verbose
            whether or not to print each step as agents play
        input: agentType
            string representing the type of agent to instantiate
        input: nHands
            number of hands to play at max if agent never busts
        input: startingMoney
            the amount of money the agent gets to start with
        input: nTraining
            the number of training hands to do for a qlearning player
        returns: nothing
        """
        self.verbose = verbose
        self.agentType = agentType
        self.nHands = int(nHands) + int(nTraining)
        self.nStartingHands = int(nHands) + int(nTraining)
        print("{} test {} train {} total".format(nHands, nTraining,
                                                 self.nHands))
        self.startingMoney = startingMoney
        self.nTraining = int(nTraining)
        self.dealer = Dealer()
        self.player = self.createAgent(self.agentType, self.startingMoney,
                                       nTraining)

        self.agents = [self.player, self.dealer]

        # Clean slate
        dealerHand = Hand()
        playerHand = Hand()
        deck = Deck()

        # list because player can split
        playerHands = [playerHand]
        if self.player:
            initialBets = [self.player.getBetAmt()]
            # Create initial game state
            self.gameState = GameState(verbose, self.dealer, dealerHand,
                                       self.player, playerHands, deck,
                                       initialBets)

        self.q = self.agentType == 'qlearning'
示例#10
0
    def __init__(self):
        pygame.display.set_caption("My Game")
        self.FPS = 60
        self.WIDTH = 900
        self.HEIGHT = 500
        self.WIN = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
        self.BLACK = (0, 0, 0)
        self.WHITE = (255, 255, 255)
        self.dest = (100, 100)
        self.handler = Handler(self)
        # should be before any other object that have handler in its constr.
        # Handler(self) current instance in the current class
        self.assets = Assets(self.handler)

        self.gameState = GameState(self.handler)
        self.gameState.init()
        self.menuState = MenuState(self.handler)
        self.menuState.init()
        self.currentState = self.menuState
示例#11
0
    def execute_one_game(self, pac_expr_tree):
        """
        Execute one game / eval of a run given experiment data
        and the root of an expression tree for controlling Pac.

        Return score.
        """
        # Pick a new map and set up a new game state.
        game_map = self.experiment.pre_loaded_maps[random.randint(0, 99)]
        self.experiment.world_data = []
        game_state = GameState(game_map, self.experiment.pill_density,
                               self.experiment.time_multiplier,
                               self.experiment.fruit_spawning_probability,
                               self.experiment.fruit_score,
                               self.experiment.num_pacs,
                               self.experiment.num_ghosts)
        game_state.write_world_config(self.experiment.world_data)
        game_state.write_world_time_score(self.experiment.world_data)

        # Create a new Pac controller
        self.pac_controllers[0] = PacController(0, pac_expr_tree)

        # While the game isn't over, play game turns.
        game_over = False
        while (not game_over):
            game_over = game_state.play_turn(self.experiment.world_data,
                                             self.pac_controllers,
                                             self.ghost_controllers)

        # Implement parsimony pressure
        fitness = 0
        if (self.parsimony_technique == 'size'):
            fitness = game_state.score - (self.pppc * pac_expr_tree.root.size)
        else:
            fitness = game_state.score - (self.pppc *
                                          pac_expr_tree.root.height)

        return fitness, game_state.score
示例#12
0
    def predict_next_move(self, game, current_player, search_iters):
        ''' Returns the (deterministic) best move choice predicted by the 
        agent. This method is for use when playing against
        the agent (or two agents playing one another) in real-time. (E.g. via 
        the play() method of the UpDown class.)
        
        Parameters
        ----------
        game : UpDown
            a game of upset-downset.
        current_player : int or str
            the current player to move. Can be either the integers 0/1 
            (see UP/DOWN in config.py) or the strings 'up'/'down' 
            (for Up/Down resp.). 
        search_iters : int (nonnegative)
            the number of search iterations to perform during MCTS.
            

        Returns
        -------
        int
            the best action in 'game' for 'current_player' as predicted by the 
            agent via the deterministic MCTS policy (i.e., temp=0)
        '''
        # make sure the model is in evaluation mode.
        self.model.eval()
        # if passed a string for current player, convert
        if isinstance(current_player, str):
            player_dict = {'up': UP, 'down': DOWN}
            current_player = player_dict[current_player.casefold()]
        # get the MCTS policy
        game_state = GameState(game, current_player)
        root = PUCTNode(game_state)
        policy = self.MCTS(root, search_iters, temp=0)

        return np.argmax(policy)
示例#13
0
def runGame(p1, p2, mute):
    p1.resetForNewGame()
    p2.resetForNewGame()
    d1 = Dealer(5)
    p1.cards = d1.dealCards()[0]
    p2.cards = d1.dealCards()[0]
    gameState = GameState(p1, p2)
    aqlearnScore = 0
    gameLoop = True
    while gameLoop:

        # print(bcolors.OKBLUE+"opponent cards:")
        # for i, x in enumerate(p2.cards):
        #     print("     " + str(i + 1) + ". " + str(x))
        # print(bcolors.ENDC)

        #! aqlearn agent steps
            #1. update
            #2. get action
            #3. generateSuccessor

        # print("Your cards:")
        # # print(p1.cards)
        # for i, x in enumerate(p1.cards):
        #     print("     " + str(i + 1) + ". " + str(x))
        # print("Enter the numerical value of the card you'd like to pick.")

        # choice = int(input(">>> ")) - 1

        # if choice >= len(p1.cards):
        #     print(bcolors.WARNING + "Invalid index! Try again..." + bcolors.ENDC)
        #     continue

        p1.update(gameState, aqlearnScore)

        action = p1.doAction(gameState)

        p1.pickCard(action)
        p2.pickCard()

        #call agent.update()
        #


        roundWinner = gameState.judgeRound(p1, p2)

        #? generating successor
        if roundWinner == p1:
            # if not mute:
            #     print(bcolors.OKGREEN + "APQ won that round!" + bcolors.ENDC)
            p1.accumulatedCards[p1.playedCard[1]] += 1
        if roundWinner == p2:
            # if not mute:
            #     print(bcolors.FAIL + "APQ lost that round." + bcolors.ENDC)
            p2.accumulatedCards[p2.playedCard[1]] += 1

        #? get transitional rewards
        p1TransitionalReward = gameState.getRewards(p1, p2)
        aqlearnScore += p1TransitionalReward
        # print(bcolors.OKGREEN + "TransitionalRewards:", str(p1TransitionalReward) + bcolors.ENDC )
        # print(bcolors.OKGREEN + "Total Score:", str(aqlearnScore) + bcolors.ENDC )

        # print("           Score           ")
        # print("***************************")
        # print(p1.name + ": ")
        # print("Fire: " + str(p1.accumulatedCards["Fire"]))
        # print("Water: " + str(p1.accumulatedCards["Water"]))
        # print("Ice: " + str(p1.accumulatedCards["Ice"]))

        # print(p2.name + ": ")
        # print("Fire: " + str(p2.accumulatedCards["Fire"]))
        # print("Water: " + str(p2.accumulatedCards["Water"]))
        # print("Ice: " + str(p2.accumulatedCards["Ice"]))
        # print("")



        if gameState.judgeGameOver(p1, p2) == p1:
            p1.update(gameState, aqlearnScore)
            if not mute:
                print(bcolors.OKBLUE + "Game Over!", p1.name + " wins!" + bcolors.ENDC)
                p1.printEpisodeInfo()
            return (aqlearnScore, "aql")
            
        elif gameState.judgeGameOver(p1, p2) == p2:

            p1.update(gameState, aqlearnScore)
            if not mute:
                print(bcolors.FAIL + "Game Over!", p2.name + " wins!" + bcolors.ENDC)
                p1.printEpisodeInfo()
            return (aqlearnScore, "greedy")
            

        

        p1.cards.append(d1.generateRandomCard())
        p2.cards.append(d1.generateRandomCard())
示例#14
0
import pygame
from pygame import Color
from gameState import GameState
from bots.jack import Jack
from bots.sunny import Sunny

if __name__ == "__main__":
    pygame.init()
    clock = pygame.time.Clock()

    gameWidth = 20
    gameHeight = 15
    blockWidth = 40
    initPos = (gameWidth / blockWidth / 2, gameHeight / blockWidth / 2)

    pygame.display.set_caption("Armageddon Blitz")

    game = GameState(gameWidth, gameHeight, blockWidth)
    game.add_bot(Jack(69), Color('orange'))
    game.add_bot(Sunny(420), Color('red'))
    game.run_game_loop()
示例#15
0
from boss import Boss
from battle import Battle
from politechnikomon import Politechnikomon


def set_done():
    done = False


#Initialization
pygame.init()
screen = pygame.display.set_mode((800, 600))

#Data setup
map = Map('map.png', 'tiles.png')
gameState = GameState()
gui = GUI(gameState, screen)
player = Player('player.png')
boss = Boss('boss.png')
ppaix = Politechnikomon('pppix.png', 'Ppaix', 20)
paichu = Politechnikomon('paichu.png', 'Paichu', 20)
oirposan = Politechnikomon('oirposan.png', 'Oirposan', 20)
niespanier = Politechnikomon('niespanier.png', 'Niespanier', 50)
map.generate_map()
TILE_SIZE = 40
position = [0, 0]

#Main loop
while not gameState.done:
    if (gameState.mode == 'map'):
        screen.fill((0, 0, 0))
示例#16
0
文件: ROTA.py 项目: Qu-CMU/public
            return response['data']['games_won'], response['data']['board']

    print("Error \'next\'\n" + str(response))
    sys.exit()

def get_hash(hashString):
    print("Saving hash to " + email + "_hash.txt")
    f = open(email + "_hash.txt","w")
    f.write("Email: " + email +  "\nHash: " + hashString)
    sys.exit()

##------------------------------------------------------------------------------------------
## Play the game
    
if __name__ == '__main__':
    player = GameState()
    state = new_game()
    numMoves = 0
    numGames = 0
    startTime = time.time()

    ## Play games until hash recieved (challenge requires 50 games)
    while True:
        print('Game Number: {0}'.format(numGames))
        print('Time Elapsed: {0:3f}'.format(time.time() - startTime))

        ## Make moves until game is over (games requires you make 30 moves without losing.)
        while numMoves < 30:
    ##        print(str(state[0]) + str(state[1]) + str(state[2]))
    ##        print(str(state[3]) + str(state[4]) + str(state[5]))
    ##        print(str(state[6]) + str(state[7]) + str(state[8]))
示例#17
0
def main(pigalgo, stonealgo, numStoneAgents, numPigAgents, maxDepth=None, quiet=False):

# calling the particular algorithms 
	if pigalgo == 'random':
		pigplayers =  [pigAgent.rrandomPigAgent(i) for i in range(numPigAgents)]
	elif pigalgo == 'simple':
		pigplayers =  [pigAgent.simplePigAgent(i) for i in range(numPigAgents)]
	elif pigalgo == 'complex':
		#there isnt really a pig complex agent so revert to simple
		pigplayers =  [pigAgent.simplePigAgent(i) for i in range(numPigAgents)]
	elif pigalgo == 'minimax':
		pigplayers =  [pigAgent.minimaxPigAgent(i) for i in range(numPigAgents)]
	elif pigalgo == 'alphabetaminimax':
		pigplayers =  [pigAgent.alphaBetaPigAgent(i) for i in range(numPigAgents)]
	else:
		raise Exception('Invalid algo name')


	if stonealgo == 'random':
		stoneplayers =  [stoneAgent.rrandomStoneAgent() for _ in range(numStoneAgents)]
	elif stonealgo == 'simple':
		stoneplayers =  [stoneAgent.simpleStoneAgent() for _ in range(numStoneAgents)]
	elif stonealgo == 'complex':
		stoneplayers =  [stoneAgent.complexStoneAgent() for _ in range(numStoneAgents)]
	elif stonealgo == 'minimax':
		stoneplayers =  [stoneAgent.minimaxStoneAgent() for _ in range(numStoneAgents)]
	elif stonealgo == 'alphabetaminimax':
		stoneplayers =  [stoneAgent.alphaBetaStoneAgent() for _ in range(numStoneAgents)]	
	else:
		raise Exception('Invalid algo name')

	players = pigplayers + stoneplayers
	
	GS = GameState(N_ROWS, N_COLS, players, numPigs=numPigAgents, quiet=quiet)
	
	# Tkinter window config
	if(not quiet):
		window = tk.Toplevel()
		window.title('Block The Pig')
		window.minsize(width=500, height=500)
		window.protocol('WM_DELETE_WINDOW', cleanUp)

	# Draw window
	if(not quiet):
		GS.draw(window)

	def update():
		if GS.allPigsEscaped() or GS.allPigsCaptured() or GS.allPigsEscapedOrCaptued():
			if(not quiet):
				cleanUp()
			
			score = GS.nPigsEscaped()
			print ('Game ended with:', score)
			return score

		GS.play() # where the magic happens
		if(not quiet):
			GS.draw(window)
			# Bug in line below
			root.after(TIME_DELAY, update)

		else:
			return(0 + update())

	if(not quiet):
		root.after(TIME_DELAY, update)
	else:
		return( 0 + update())

	#
	if(not quiet):
		root.mainloop()
示例#18
0
 def __init__(self):
     self.board = Board()
     self.game_state = GameState()
     self.selected_square = [0, 0]
     self.possible_moves = []
     self.white_turn = True
示例#19
0
from gameState import GameState

## Basic Fuzzing Test
## We have the GameState play against itself.
## Since it is a FSM designed to never reach a terminal state, the games should go on indefinitely.
## GameState() will ramdomly select a path given multiple possible moves allowing for complete path coverage eventually.
testPlayer = GameState()

## Run 100 games
for i in range(100):
    state = list('---------')

    ##    print("%c%c%c" %(state[0],state[1],state[2]))
    ##    print("%c%c%c" %(state[3],state[4],state[5]))
    ##    print("%c%c%c\n" %(state[6],state[7],state[8]))

    ## Play 100 moves
    for _ in range(100):
        update = testPlayer.get_next_move(state, player=1)

        if len(update) == 1:
            state[update[0] - 1] = 'p'
        else:
            state[update[0] - 1] = '-'
            state[update[1] - 1] = 'p'

##        print("Player 1")
##        print("%c%c%c" %(state[0],state[1],state[2]))
##        print("%c%c%c" %(state[3],state[4],state[5]))
##        print("%c%c%c\n" %(state[6],state[7],state[8]))
示例#20
0
def menuLoop():
    import ui
    import otherEffects
    done = False
    hud = ui.HudScreen()
    hud.set_button_text(0, "Play")
    hud.set_button_text(1, "Editor")
    hud.set_button_text(2, "Tutorial")
    hud.set_button_text(3, "About")
    hud.set_button_text(4, "Exit")
    flag = None
    pygame.mouse.set_visible(True)
    pygame.event.set_grab(False)

    fractal = otherEffects.ChaosObject(
        (renderer.SCREEN_WIDTH // 2, (renderer.SCREEN_HEIGHT // 2) + 15), 225,
        3)
    s_field = otherEffects.StarField(renderer.SCREEN_SIZE)

    hud.onChangedButton.append(s_field.change_speed)
    while not done:
        deltaTime = clock.get_time() / 1000

        events = pygame.event.get()
        flag = hud.update(deltaTime, events)
        for event in events:

            if event.type == pygame.QUIT:
                return GameState.Quit

            elif event.type == pygame.KEYDOWN:
                if event.key == pygame.K_p:
                    return GameState.Quit

        if hud.selected_button == GameState.Quit.value:
            s_field.speed += deltaTime * 75
            if s_field.speed > 100:
                s_field.speed = 100

        for x in range(10):
            fractal.update()

        s_field.update(deltaTime)

        # region Buttons

        s_field.draw()
        renderer.SCREEN.blit(s_field, (0, 0))
        fractal.draw(renderer.SCREEN)

        textDraw.message_display_MT(renderer.SCREEN, "The dawn of Otrozhny",
                                    renderer.SCREEN_WIDTH // 2, 100, 30)
        textDraw.message_display_MT(renderer.SCREEN, "Containment breach",
                                    renderer.SCREEN_WIDTH // 2, 150, 30)

        if flag is not None:
            return GameState(flag)

        hud.draw()
        pygame.display.update()
        clock.tick()
示例#21
0
    def approximate_outcome(self, game, search_iters):
        '''Approximates the outcome of game. For this to malke sense the game
        must be a normal play short partisan combinatorial game (a combinatorial 
        game which ends in a finite number of moves and the last player to 
        move wins).
        
        The approximate outcome is found by having the agent self-play
        the game twice: once with each player starting. The results are
        combined via the outcome rules for normal play short partisan 
        combintorial games to determine an approximate outcome.
        
        (For more in for on combinatorila game see :
        https://en.wikipedia.org/wiki/Combinatorial_game_theory)
            
        
        Parameters
        ----------
        game : UpDown
            a game of upset-downset.
        search_iters : int (positive),
            the number of search iterations to perform during each MCTS.

        Returns
        -------
        approx_out : str,
            'Next', if the Next player (first player to move) wins.
            'Previous', if the Previous player (second player to move) wins.
            'Up', if Up can force a win. (Playing first or second). 
            'Down', if Down can force a win. (Playing first or second).

        '''
        # make sure the model is in evaluation mode.
        self.model.eval()
        # game state from each players persepective
        up_start = GameState(game, UP)
        down_start = GameState(game, DOWN)
        # action space and stor for outcomes found.
        actions = np.arange(MAX_NODES)
        outcomes = []
        #self-play, once with each player moving first
        for game_state in [up_start, down_start]:
            # set root
            root = PUCTNode(game_state)
            move_count = 0
            # play until a terminal state is reached
            while not root.state.is_terminal_state():
                policy = self.MCTS(root, search_iters, temp=0)
                move = np.random.choice(actions, p=policy)
                root = root.edges[move]
                root.to_root()
                move_count += 1

            # update outcome: 'P' for second player to move
            # and 'N' for first player to move
            out = 'P' if (move_count % 2 == 0) else 'N'
            outcomes.append(out)

        # get outcome prediction
        up_start_out, down_start_out = outcomes
        if up_start_out == 'P' and down_start_out == 'P':
            approx_out = 'Previous'
        elif up_start_out == 'N' and down_start_out == 'N':
            approx_out = 'Next'
        elif up_start_out == 'P' and down_start_out == 'N':
            approx_out = 'Down'
        elif up_start_out == 'N' and down_start_out == 'P':
            approx_out = 'Up'

        return approx_out