コード例 #1
0
def selection(population, rounds=10):
    for deck in population:
        deck.reset()

    for i, deck_1 in enumerate(population):
        for j, deck_2 in enumerate(population):
            if i > j:
                game = Game(card_set, (deck_1.deck, deck_2.deck))
                for k in range(rounds):
                    result = tuple(game.play())
                    if result[0] == 1:
                        deck_1.won += 1
                        deck_2.lost += 1
                    elif result[1] == 1:
                        deck_1.lost += 1
                        deck_2.won += 1
                    else:
                        deck_1.draw += 1
                        deck_2.draw += 1
            else:
                break

    population.sort(key=methodcaller('winrate'), reverse=True)
    # print(population[0].deck, population[0].winrate(), sep='\t')
    # print(calculate_card_ii(population, card_set))
    return population[:len(population) // 2]
コード例 #2
0
    def create_widgets(self):
        self.game = Game()
        self.game_tiles = GameTiles(master=self)
        self.draw_game_tiles()
        self.game_tiles.grid()

        self.score_strvar = tk.StringVar()
        self.game_score = tk.Label(self, textvariable=self.score_strvar)
        self.draw_score()
        self.game_score.grid()

        self.new_game_button = tk.Button(self,
                                         text="New Game",
                                         command=self.new_game)
        self.new_game_button.grid()

        self.load_game_button = tk.Button(self,
                                          text="Load Game",
                                          command=self.load_game)
        self.load_game_button.grid()

        self.quit = tk.Button(self,
                              text="Quit",
                              fg="red",
                              command=self.master.destroy)
        self.quit.grid()
コード例 #3
0
def fight():
    form = DeckForm()

    if form.validate_on_submit():
        angel_deck = tuple(
            map(int,
                form.angel_deck.data.replace(' ', '').split(',')))
        demon_deck = tuple(
            map(int,
                form.demon_deck.data.replace(' ', '').split(',')))

        card_set = import_card_set_from_file("test_set.scg")
        game = Game(card_set, (angel_deck, demon_deck))

        angel = 0
        demon = 0
        tests = 100
        for i in range(tests):
            result = tuple(game.play())
            angel += result[0]
            demon += result[1]

        response = "<p>Angel winrate: " + str(100 * angel / tests) + "%</p>"
        response += "<p>Demon winrate: " + str(100 * demon / tests) + "%</p>"
        response += "<p>Draw rate: " + str(100 *
                                           (1 -
                                            (angel + demon) / tests)) + "%</p>"
        return response

    return render_template("index.html", form=form)
コード例 #4
0
def index():
    newGame = Game()
    newGame.story = {}
    with open('chapter1.ch', 'rb') as chapter:
        story = pickle.load(chapter)
        newGame.story_flow(story)
    if request.method == 'POST':
        valid_inputs = request.form.get('valid_inputs')
        return valid_inputs
    return render_template('index.html')
コード例 #5
0
 def add_player(self, player_guid):
     player = self.new_player(player_guid)
     if len(self.waiting_games) > 0:
         game = self.waiting_games.pop(0)
     else:
         game = Game(str(uuid.uuid4()))
         self.waiting_games.append(game)
     state = game.add_player(player)
     if len(game.players) == 2:
         self.running_games[game.guid] = game
     return state
コード例 #6
0
ファイル: drone_game.py プロジェクト: Psykopear/DroneGame
def main(size, x, y, drone_x, drone_y, knowledge=False):
    MATRIX_SIZE = int(size)
    END_X = int(x)
    END_Y = int(y)
    START_X = int(drone_x)
    START_Y = int(drone_y)
    KNOWLEDGE = knowledge
    world = world_generator(MATRIX_SIZE, END_X, END_Y, KNOWLEDGE)
    if KNOWLEDGE:
        drones = [DroneKnowledge(world, START_X, START_Y, MATRIX_SIZE)]
    else:
        drones = [Drone(MATRIX_SIZE, START_X, START_Y)]
    game = Game(world, drones, KNOWLEDGE)
    return game.start_game()
コード例 #7
0
def multiplayer(game: Game):
    # show game state at the start of the game
    print(game.__dict__)
    while game.state == State.CONTINUE:
        # print board
        for row in game.board:
            print(row)

        # Display player's turn, pieces left... and input move
        if game.player_turn == Player.ODD:
            player = '[odd]'
            pieces_left = [ piece for piece in game.odd_pieces.values() if piece ]
        else:
            player = '[even]'
            pieces_left = [ piece for piece in game.even_pieces.values() if piece ]

        piece = int(input(player +  " select piece to play: (options: " + str(pieces_left) + ") "))
        row = int(input(player +  " select row to place piece: "))
        column = int(input(player +  " select column to place piece: "))
        print(game.play(piece, row, column))

    # show end of game message and final board configuration
    if game.state == State.EVEN_WIN:
        print('even player won:')
    elif game.state == State.ODD_WIN:
        print('odd player won:')
    else:
        print('draw:')

    for row in game.board:
            print(row)
コード例 #8
0
def run_game(gui_input=None):
    setup_basic_logging(gui_input)

    logger.info('Выбираем пользователя...')

    site, settings = get_site(gui_input)

    Game(site, settings, UserPrompt(gui_input), gui_input=gui_input).start()
コード例 #9
0
def singleplayer(game: Game):
    if input("enter e to play as even, o to play as odd: ") == 'e':
        user = Player.EVEN
        agent = Agent(Player.ODD)
    else:
        user = Player.ODD
        agent = Agent(Player.EVEN)


    # show game state at the start of the game
    game_info = game.__dict__
    print(game_info)
    while game.state == State.CONTINUE:
        # print board
        print()
        print('board:')
        for row in game.board:
            print(row)
        print()
        

        if game.player_turn == user: # get user input
            # Display player's turn, pieces left... and input move
            if user == Player.ODD:
                player = '[odd]'
                pieces_left = [ piece for piece in game.odd_pieces.values() if piece ]
            else:
                player = '[even]'
                pieces_left = [ piece for piece in game.even_pieces.values() if piece ]

            piece = int(input(player +  " select piece to play: (options: " + str(pieces_left) + ") "))
            row = int(input(player +  " select row to place piece: "))
            column = int(input(player +  " select column to place piece: "))

        else: # get agent input
            move = agent.get_move(game)
            piece = move['piece']
            row = move['row']
            column = move['col']

        game_info = game.play(piece, row, column)
        print()
        print('game engine object information:')
        print(game_info)
        print()


    # show end of game message and final board configuration
    if game.state == State.EVEN_WIN:
        print('even player won:')
    elif game.state == State.ODD_WIN:
        print('odd player won:')
    else:
        print('draw:')

    for row in game.board:
            print(row)
コード例 #10
0
async def on_message(message):

    # Do not want the bot to reply to itself
    if message.author == client.user:
        return

    if message.content.startswith('!test'):
        await message.channel.send(message.channel.type)

    # Message viene de un canal de texto (no mensaje directo)
    if str(message.channel.type) == "text":

        # Help
        if message.content.startswith('!help'):
            msg = '''   !join - 
    !start - 
    !help - this page
    !whisper -
    In DM:
        !stats
    '''
            await message.channel.send(msg)

        # Test susurro
        if message.content.startswith('!whisper'):
            #user=await client.get_user_info(message.author.id)
            await message.author.send("I'm a very tall midget")

        # Unirse a partida
        if message.content.startswith('!join'):
            current_channel = str(message.channel.guild) + str(
                message.channel.id)
            msg = 'User {0.author.mention} is included in the {1} game'.format(
                message, current_channel)

            # Si ya existe un juego activo en este canal
            if current_channel in games:
                # No permitir jugadores repetidos
                if message.author in games[current_channel]:
                    msg = '{0.author.mention} is already in {1} game'.format(
                        message, current_channel)
                else:
                    games.addUser(current_channel, message.author)
            else:
                games.addChannel(current_channel)
                games.addUser(current_channel, message.author)

            await message.channel.send(msg)

        # Ver los usuarios en el juego actual en el canal actual
        if message.content.startswith('!users'):
            current_channel = str(message.channel.guild) + str(
                message.channel.id)
            msg = '''Joined users in game:
            '''
            if current_channel in games:
                for user in games[current_channel]:
                    msg += '''user {}
                    '''.format(user)
            else:
                msg = ''' No users in THE GAME'''
            await message.channel.send(msg)

        # Inicia el juego en este canal
        if message.content.startswith('!start'):
            current_channel = str(message.channel.guild) + str(
                message.channel.id)
            if current_channel in games:
                if len(games[current_channel]) < 1:  #TODO 3
                    msg = '''Not enough people'''
                elif len(games[current_channel]) > 8:
                    msg = '''Too much people'''
                else:
                    msg = '''The GAME starts'''

                    # Games es un diccionario: key = canal, val = lista de usuarios
                    for user in games[current_channel]:
                        users[user.id] = current_channel
                        await client.get_user(user.id).send("Private DM")

                    #TODO: juego iniciado asyncronamente
                    game = Game(players=[
                        Game.Player(i, user.id, {'active': True}, {
                            'gems': 0,
                            'p_gems': 0
                        }) for i, user in enumerate(games[current_channel])
                    ], )
                    threading.Thread(target=game.main_loop).start()

            await message.channel.send(msg)
            await message.channel.send("Game ")

            #Check en el futuro (?)
            timestamps[current_channel] = int(
                datetime.timestamp(datetime.now()))
            for user in games[current_channel]:
                usersresponse[current_channel] = {user: 0}
            # await check_turn(client,timestamps, usersresponse)
            # channelgame[current_channel] = Game()

            # resp = channelgame[current_channel].Start()

    else:  # Private DM
        if message.content.startswith('!stats'):

            if message.author.id in users:
                msg = str(users[message.author.id]) + str(
                    usersresponse[users[message.author.id]][message.author.id])
            else:
                msg = "No game has started "

            await message.author.send(msg)

        # Jugador quiere seguir
        if message.content.startswith('!continue'):
            if message.author.id in users:
                # usersrespones es un dict: key=channelID, val = lista de usuarios en ese juego
                usersresponse[users[message.author.id]][message.author.id] = 1
                await check_turn(client, timestamps, usersresponse)

        # Jugador no quiere seguir
        if message.content.startswith('!leave'):
            if message.author.id in users:
                usersresponse[users[message.author.id]][message.author.id] = -1
                await check_turn(client, timestamps, usersresponse)
コード例 #11
0
ファイル: GamesManager.py プロジェクト: KyVakl/MOTUX
 def createNewGame(self, dico):
     return Game.Game(self.m_options, dico)
コード例 #12
0
#!/usr/bin/python3
from game_engine import Game
import random as rd

g = Game(log=True)

dino = g.create_box(2, 3, 5, 20)

boxes = []
boxes.append(g.create_box(2, 2, 70, 21))

## need to save and display highscore
time_elapsed = 0
score = 0
delay = 50
score_box = g.create_box(20, 1)

while True:
    #k = g.next_frame(delay, score) # enter delay in ms

    # Update stats
    time_elapsed += 50
    score += int(time_elapsed / 10000) + 1

    # Update boxes
    for box in boxes:
        box.set_x(box.x - 1)

    #if time_elapsed % 10000 == 0:
    #delay -= 2
    if time_elapsed % 1000 == 0:
コード例 #13
0
        # Display player's turn, pieces left... and input move
        if game.player_turn == Player.ODD:
            player = '[odd]'
            pieces_left = [ piece for piece in game.odd_pieces.values() if piece ]
        else:
            player = '[even]'
            pieces_left = [ piece for piece in game.even_pieces.values() if piece ]

        piece = int(input(player +  " select piece to play: (options: " + str(pieces_left) + ") "))
        row = int(input(player +  " select row to place piece: "))
        column = int(input(player +  " select column to place piece: "))
        print(game.play(piece, row, column))

    # show end of game message and final board configuration
    if game.state == State.EVEN_WIN:
        print('even player won:')
    elif game.state == State.ODD_WIN:
        print('odd player won:')
    else:
        print('draw:')

    for row in game.board:
            print(row)


game = Game()
if input("enter m to play multiplayer, s to play singleplayer: ") == 'm':
    multiplayer(game)
else:
    singleplayer(game)
コード例 #14
0
from game_engine import Game
from game_engine import Level

if __name__ == '__main__':
    game = Game()
    game.run()
コード例 #15
0
ファイル: tiny_man.py プロジェクト: G-Eazy/DinoTerminalGame
from game_engine import Game
import random as rd
import subprocess
g = Game(log=True)

dino = g.create_box(2, 3, 5, 20)
dino.set_text("^^\n-0-\n/\\")
score_box = g.create_box(5, 10, 1, 1)
width, height = g.dimensions()
score = 0
boxes = []
ground = g.create_box(width - 1, 3, 0, int(height / 2) + 4)
ground.set_text("^" * 200, repeatable=True)


def fire():
    #os.system('exec aplay -q ~/Div/waw-files/boing_poing.wav')
    if (count_shots() < 3):
        subprocess.Popen(["aplay", "-q", "../sounds/boing_poing.wav"])
        boxes.append(
            g.create_box(1, 1, dino.x + dino.width + 1, dino.y + 1, shot=True))


def jump():
    dino.set_y(dino.y - 2)


def crouch():
    dino.set_y(dino.y + 2)

コード例 #16
0
def main(
    model_h5_file: str,
    target_h5_file: str,
    two_tile_prob: float,
    random_seed: int,
    # num_games: int,
    # val_split: float,
    train_game_dir: str,
    # val_game_dir: str,
):
    """
    Build training/validation dataset by playing N games.
    model_h5_file - path to a model saved in .h5 file to use when selecting
    actions (via epsilon-greedy method), etc.
    target_h5_file - path to a model saved in .h5 file to use when computing
    labels for experience tuples
    """

    # build training dataset by playing N complete games
    data_train = []
    labels_train = []
    data_val = []
    labels_val = []
    TWO_TILE_PROB = two_tile_prob
    RANDOM_SEED = random_seed
    # NUM_GAMES = num_games
    # VAL_SPLIT = val_split
    # NUM_TRAIN_GAMES = int(NUM_GAMES * (1 - VAL_SPLIT))
    TRAIN_GAME_FILES_DIR = train_game_dir
    # VAL_GAME_FILES_DIR = val_game_dir

    print(f"==== Loading model from {model_h5_file} ====")
    value_model = load_model(model_h5_file)

    print(f"==== Loading target model from {target_h5_file} ====")
    target_model = load_model(target_h5_file)

    # Weights & Biases
    wandb.init(project="2048-deep-rl")

    # for epsilon-greedy action selection
    # set initial epsilon to 1, and then linearly anneal to a lower value
    epsilon_start = 1.0
    epsilon_final = 0.05
    epsilon_anneal_start_t = 1
    epsilon_anneal_end_t = 50_000

    # discount factor
    gamma = 0.99

    # hold the target Q-model fixed for this many timesteps before updating with minibatch
    target_update_delay = 10_000

    # save value model
    value_model_save_period = 1_000

    # Each SGD update is calculated over this many experience tuples (sampled randomly from the replay memory)
    minibatch_size = 32

    # SGD updates are sampled from this number of the most recent experience tuples
    replay_memory_capacity = 10_000

    # how many timesteps of learning per episode
    timesteps_per_episode = 100_000

    # populate replay memory by using a uniform random policy for this many timesteps before learning starts
    burnin_period = 10_000

    # action is encoded as an int in 0..3
    # TODO refactor this to be a global defined in a different file, maybe experience_replay_utils.py?
    ACTIONS = ["Up", "Down", "Left", "Right"]

    value_model.compile(optimizer="sgd", loss="mean_squared_error")

    # Generate the experience tuples to fill replay memory for one episode of training
    #
    # replay memory format:
    # - state,
    # - action,
    # - reward,
    # - max Q-value of successor state based on target network,
    # - Q(state,action) based on current network

    # TODO abstract away the generation of experience tuples into a generator class?

    replay_memory_ndarray = np.zeros((replay_memory_capacity, 2 * 16 * 17 + 2))
    replay_memory_idx = 0
    game = Game()
    game.new_game(random_seed=RANDOM_SEED, game_dir=TRAIN_GAME_FILES_DIR)
    np.random.seed(RANDOM_SEED)
    print(f"New game (random seed = {RANDOM_SEED})")

    for t in range(burnin_period):
        if game.state.game_over:
            RANDOM_SEED = random.randrange(100_000)
            game = Game()
            game.new_game(random_seed=RANDOM_SEED,
                          game_dir=TRAIN_GAME_FILES_DIR)
            np.random.seed(RANDOM_SEED)
            print(f"New game (random seed = {RANDOM_SEED})")

        current_state = game.state.copy()
        # print("current state:", current_state.tiles)

        # choose an action uniformly at random during the burn-in period (to initially populate the replay memory)
        action = np.random.choice(np.arange(4))

        # update current state using the chosen action
        game.move(ACTIONS[action])
        new_state = game.state.copy()
        # print("new state:", new_state.tiles)
        reward = new_state.score - current_state.score

        # save the (s,a,s',r) experience tuple (flattened) to replay memory
        exp = ExperienceReplay(current_state.tiles, action, new_state.tiles,
                               reward)
        replay_memory_ndarray[replay_memory_idx] = exp.flatten()
        replay_memory_idx = replay_memory_idx + 1
        if replay_memory_idx == replay_memory_capacity:
            replay_memory_idx = 0
        # if reward > 0:
        #     print(f"experience tuple with reward: {exp}")

    # replay_memory_ndarray = np.asarray(replay_memory)
    print("replay_memory shape:", replay_memory_ndarray.shape)
    # assert len(replay_memory) == burnin_period
    # print("writing replay memory to file:")
    # with open("replay_memory_burnin.txt", "w") as burn_in_file:
    #     for i in range(len(replay_memory)):
    #         exp = ExperienceReplay.from_flattened(replay_memory[i])
    #         burn_in_file.write(repr(exp) + "\n\n")

    timesteps_since_last_update = 0
    last_time_check = time.perf_counter()
    for t in range(timesteps_per_episode):
        epsilon = linear_anneal_parameter(
            epsilon_start,
            epsilon_final,
            epsilon_anneal_start_t,
            epsilon_anneal_end_t,
            t,
        )
        fit_verbose = 0
        if (t + 1) % 500 == 0:
            print(f"timestep = {t}, epsilon = {epsilon}")
            new_time = time.perf_counter()
            print(f"avg time per step: {(new_time - last_time_check) / t}")
            fit_verbose = 1

        if game.state.game_over:
            RANDOM_SEED = random.randrange(100_000)
            game = Game()
            game.new_game(random_seed=RANDOM_SEED,
                          game_dir=TRAIN_GAME_FILES_DIR)
            np.random.seed(RANDOM_SEED)
            # print(f"New game (random seed = {RANDOM_SEED})")

        current_state = game.state.copy()
        # print("current state:", current_state.tiles)

        # choose an action (epsilon-greedy)
        epsilon_greedy_roll = np.random.random_sample()
        if epsilon_greedy_roll < epsilon:
            action = np.random.choice(np.arange(4))
            # print("chosen action (randomly):", ACTIONS[action])
        else:
            # choose the "best" action based on current model weights -> Q values
            network_input = np.expand_dims(convert_tiles_to_bitarray(
                current_state.tiles),
                                           axis=0)
            network_output = value_model.predict(network_input)[0]
            assert len(network_output) == 4
            # print(f"network output: {network_output}")
            action = np.argmax(network_output)
            # print("chosen action (best):", ACTIONS[action])

        # update current state using the chosen action
        game.move(ACTIONS[action])
        new_state = game.state.copy()
        # print("new state:", new_state.tiles)
        reward = new_state.score - current_state.score

        # save the (s,a,s',r) experience tuple (flattened) to replay memory
        exp = ExperienceReplay(current_state.tiles, action, new_state.tiles,
                               reward)
        replay_memory_ndarray[replay_memory_idx] = exp.flatten()
        replay_memory_idx = replay_memory_idx + 1
        # if reward > 0:
        #     print(f"experience tuple with reward: {exp}")

        # Constrain replay memory capacity
        if replay_memory_idx == replay_memory_capacity:
            replay_memory_idx = 0
        # if len(replay_memory) > replay_memory_capacity:
        #     shift = len(replay_memory) - replay_memory_capacity
        #     replay_memory = replay_memory[shift:]
        # assert len(replay_memory) <= replay_memory_capacity

        # Sample a minibatch of experience tuples from replay memory
        # replay_memory_ndarray = np.asarray(replay_memory)
        # TODO is the minibatch sampled without replacement?
        minibatch_indices = np.random.choice(replay_memory_ndarray.shape[0],
                                             minibatch_size,
                                             replace=False)
        minibatch = replay_memory_ndarray[minibatch_indices]
        # print(f"minibatch shape: ", minibatch.shape)
        assert minibatch.shape == (minibatch_size,
                                   replay_memory_ndarray.shape[1])

        # Compute the labels for the minibatch based on target Q model (vectorized)
        minibatch_succs = replay_memory_ndarray[minibatch_indices,
                                                (16 * 17 + 1):(2 * 16 * 17 +
                                                               1)]
        minibatch_rewards = replay_memory_ndarray[minibatch_indices,
                                                  (2 * 16 * 17 + 1)]
        target_output = target_model.predict(minibatch_succs)
        best_q_values = np.max(target_output, axis=1)
        labels = minibatch_rewards + gamma * best_q_values

        # # Compute the labels for the minibatch based on target Q model
        # labels = np.zeros((minibatch_size,))
        # # print(f"labels shape: ", labels.shape)
        # for j in range(minibatch_size):
        #     # Parse out (s, a, s', r) from the experience tuple
        #     # minibatch_exp = ExperienceReplay.from_flattened(minibatch[j])
        #     successor_bitarray = minibatch[j, (16 * 17 + 1) : (2 * 16 * 17 + 1)]
        #     reward = minibatch[j, (2 * 16 * 17 + 1)]
        #     target_input = np.expand_dims(successor_bitarray, axis=0)
        #     target_output = target_model.predict(target_input)[0]
        #     best_q_value = np.max(target_output)
        #     # TODO check if the successor state is a terminal state: if so, then the label is just the reward
        #     labels[j] = reward + gamma * best_q_value
        # # print(f"labels: ", labels)

        # Perform SGD update on current Q model weights based on minibatch & labels
        minibatch_x = minibatch[:, :(16 * 17)]
        _first_record = minibatch_x[0].reshape((4, 4, 17))
        # print(f"minibatch_x shape = {minibatch_x.shape}, first record = {_first_record}")
        value_model.fit(x=minibatch_x,
                        y=labels,
                        batch_size=minibatch_size,
                        verbose=fit_verbose)

        model_h5_filename = os.path.splitext(model_h5_file)[0]
        model_h5_out = f"{model_h5_filename}_{t}.h5"
        if t % value_model_save_period == 0 and t > 0:
            print(f"==== Saving value model to {model_h5_out} ====")
            value_model.save(model_h5_out)

        # Only update the target model to match the current Q model every C timesteps
        timesteps_since_last_update += 1
        if timesteps_since_last_update >= target_update_delay:
            timesteps_since_last_update = 0

            # update the target model
            model_h5_out = f"{model_h5_filename}_{t}.h5"
            value_model.save(model_h5_out)
            target_model = load_model(model_h5_out)
            target_h5_filename = os.path.splitext(target_h5_file)[0]
            target_h5_out = f"{target_h5_filename}_{t}.h5"
            print(f"==== Saving target model to {target_h5_out} ====")
            target_model.save(target_h5_out)
コード例 #17
0
ファイル: main.py プロジェクト: PhearTheCeal/zed-word
from game_engine import Game
import pygame, sys
from pygame.locals import *

pygame.init()
game = Game()
c = pygame.time.Clock()
while __name__ == "__main__":
    c.tick(30)
    game.update()
    for event in pygame.event.get(): 
        if event.type == QUIT: 
            pygame.quit()
            sys.exit()
コード例 #18
0
from game_engine import Game
import time

g = Game(log=True)

# create box in frame
box = g.create_box(4, 4, 10, 10)
g.next_frame(250)

# create box top left
box2 = g.create_box(4, 4, -2, -2)
g.next_frame(250)

# create box top right
box3 = g.create_box(4, 4, g.width - 2, -2)
g.next_frame(250)

# create box bottom left
box4 = g.create_box(4, 4, -2, g.height - 2)
g.next_frame(250)

# create box bottom right
box5 = g.create_box(4, 4, g.width - 2, g.height - 2)
g.next_frame(250)

# create box outside
box6 = g.create_box(4, 4, -10, -10)
g.next_frame(600)

# remove box outside of frame
g.destroy_box(box6)
コード例 #19
0
from game_engine import Game
from set_reader import import_card_set_from_file

decks = ([3, 4, 4, 3, 3, 4, 4, 4, 4, 5, 4, 3, 3, 3, 4, 3, 3, 6, 3,
          4], [6, 5, 6, 6, 6, 6, 4, 5, 5, 6, 6, 6, 6, 4, 6, 6, 6, 5, 5, 4],
         [2, 2, 2, 2, 2, 1, 1, 1, 5, 1, 1, 1, 1, 4, 2, 4, 5, 4, 1,
          1], [2, 6, 6, 6, 2, 3, 4, 5, 6, 6, 5, 6, 2, 6, 3, 1, 6, 3, 3, 3],
         [2, 5, 7, 2, 2, 7, 7, 7, 7, 7, 2, 2, 2, 3, 2, 2, 7, 7, 2,
          7], [2, 2, 2, 5, 2, 2, 7, 2, 7, 7, 7, 7, 7, 2, 2, 7, 2, 1, 7, 7],
         [6, 1, 6, 2, 2, 2, 1, 2, 1, 3, 2, 3, 5, 1, 4, 2, 1, 4, 1,
          3], [5, 1, 5, 5, 1, 4, 1, 1, 5, 1, 13, 6, 7, 2, 2, 5, 13, 4, 13, 3],
         [2, 5, 9, 1, 5, 2, 1, 5, 1, 1, 14, 2, 13, 14, 1, 1, 1, 7, 14,
          13], [1, 1, 1, 13, 3, 3, 1, 13, 1, 1, 1, 1, 1, 14, 2, 2, 1, 3, 4, 9])

if __name__ == "__main__":
    card_set = import_card_set_from_file("test_set.scg")
    game = Game(card_set, (decks[-2], decks[-1]))
    game.play()
コード例 #20
0
 def init_game():
     game = GameEngine()
     game.put_player(1, 0)
     game.put_box(4, 6)
     game.put_box(4, 7)
     game.add_goal(1, 3)
     game.add_goal(9, 9)
     game.build_wall(2, 2, 1, 3)
     game.build_wall(0, 4, 3, 1)
     game.build_wall(5, 3, 2, 1)
     game.build_wall(4, 2, 1, 2)
     game.build_wall(4, 0, 1, 1)
     return game
コード例 #21
0
ファイル: game_test.py プロジェクト: dferndz/r2d2
from game_engine import Game

game = Game()
game.put_player(1, 0)
game.put_box(4, 6)
game.put_box(4, 7)
game.add_goal(1, 3)
game.add_goal(9, 9)
game.build_wall(2, 2, 1, 3)
game.build_wall(0, 4, 3, 1)
game.build_wall(4, 4, 3, 1)

while not game.win():
    print(game.board)
    move = input()

    if move == 'w':
        game.up()

    if move == 's':
        game.down()

    if move == 'a':
        game.left()

    if move == 'd':
        game.right()
print(game.board)
コード例 #22
0
class GameWindow(tk.Frame):
    def __init__(self, master=None):
        super().__init__(master)
        self.master = master
        self.grid()
        self.create_widgets()

    def create_widgets(self):
        self.game = Game()
        self.game_tiles = GameTiles(master=self)
        self.draw_game_tiles()
        self.game_tiles.grid()

        self.score_strvar = tk.StringVar()
        self.game_score = tk.Label(self, textvariable=self.score_strvar)
        self.draw_score()
        self.game_score.grid()

        self.new_game_button = tk.Button(self,
                                         text="New Game",
                                         command=self.new_game)
        self.new_game_button.grid()

        self.load_game_button = tk.Button(self,
                                          text="Load Game",
                                          command=self.load_game)
        self.load_game_button.grid()

        self.quit = tk.Button(self,
                              text="Quit",
                              fg="red",
                              command=self.master.destroy)
        self.quit.grid()

    def draw_game_tiles(self):
        self.game_tiles.draw_tiles(self.game)

    def draw_score(self):
        self.score_strvar.set(f"Score: {self.game.state.score}")

    def new_game(self):
        self.master.bind('<Up>', self.move)
        self.master.bind('<Down>', self.move)
        self.master.bind('<Left>', self.move)
        self.master.bind('<Right>', self.move)

        self.game.new_game()
        self.draw_game_tiles()
        self.draw_score()

    def load_game(self):
        fname = filedialog.askopenfilename(filetypes=(("CSV files",
                                                       "*.csv"), ))
        if fname:
            # TODO add instructions to the GUI?
            self.master.unbind('<Up>')
            self.master.unbind('<Down>')
            self.master.bind('<Left>', self.decrement_turn_number)
            self.master.bind('<Right>', self.increment_turn_number)
            print(f"Loading game from {fname}!")
            with open(fname, 'r') as game_file:
                self.game_states = game_file.readlines()
                self.turn_number = 0
            self.load_game_state()

    def load_game_state(self):
        print(f"turn number = {self.turn_number}")
        self.game.state = GameState.from_csv_line(
            self.game_states[self.turn_number])

        next_action = self.game_states[self.turn_number].split(',')[-1].strip()
        print(f"action from this state: {next_action}")

        self.draw_game_tiles()
        self.draw_score()

    def increment_turn_number(self, event):
        if self.turn_number < len(self.game_states) - 1:
            self.turn_number += 1
            self.load_game_state()

    def decrement_turn_number(self, event):
        if self.turn_number > 0:
            self.turn_number -= 1
            self.load_game_state()

    def move(self, event):
        print(f"moving in dir {event.keysym}")
        self.game.move(event.keysym)
        self.draw_game_tiles()
        self.draw_score()
        if self.game.state.game_over:
            # TODO display a message on GUI
            print("Game over! no moves available")