Exemplo n.º 1
0
def test_reverseDirection():
    paddle = Paddle(Arena())
    assert paddle.getDirection() == 0
    paddle.reverseDirection()
    assert paddle.getDirection() == 1
Exemplo n.º 2
0
from arena import Arena, MonsterGroup
from act_1_monsters import AcidSlime
field = Arena(ID="SpireArena")
groups = [MonsterGroup(), MonsterGroup()]
field.AddGroups(*groups)
for group, name in zip(groups, [str(_ + 1) for _ in range(len(groups))]):
    group.AddMonster(AcidSlime(f"slime_{name}", field, group))
for _ in range(10):
    field.Turn()
Exemplo n.º 3
0
"""
An arena app to test CodinGame competition code.
See the README for more details on the app.
"""

import sys
from arena import Arena

if __name__ == "__main__":
    Arena(sys.argv).run()





Exemplo n.º 4
0
import os
from arena import Arena

token = os.environ['ARENA']
arena = Arena(token)
Exemplo n.º 5
0
# with open('test.txt', 'w') as f:
#     f.write(str(size))

size_fields = 100, 50
black = 0, 0, 0

screen = pygame.display.set_mode((size[0], size[1]), pygame.NOFRAME)

# ball = pygame.image.load("intro_ball.gif")
# ballrect = ball.get_rect()

mainloop = True
tick_time = 50

arena = Arena(size_fields)
arena.init(4)

while mainloop:
    key_list = []
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            mainloop = False
        elif event.type == pygame.KEYDOWN:
            if event.key == pygame.K_ESCAPE:
                mainloop = False
            elif event.key == pygame.K_F5:
                arena.init(2)
            elif event.key == pygame.K_KP_PLUS:
                tick_time = int(tick_time * 0.5)
            elif event.key == pygame.K_KP_MINUS:
Exemplo n.º 6
0
def test_setPoint_getPoint():
    arena = Arena()
    assert arena.getPoint(5, 5) == 0
    arena.setPoint(5, 5, 5)
    assert arena.getPoint(5, 5) == 5
    assert arena.getPoint(5, 5) != 0
Exemplo n.º 7
0
def test_getWidth():
    arena = Arena()
    assert arena.getWidth() == 30
Exemplo n.º 8
0
    within the `models` directory. An example is given as `ai_rando`.",
    )
    parser.add_argument(
        "-d",
        "--debug",
        action="store_true",
        help="Enable debug mode: The Tetris Engine will wait until input \
                is received from your Model before updating the frame")
    parser.add_argument(
        '-s',
        '--seed',
        dest='seed',
        default=69,
        help=
        'The seed for the random number generator. Affects block generation')
    args = parser.parse_args()
    args.seed = int(args.seed)

    if args.model:

        model = import_player(args.model)
        # Let the games begin
        App = TetrisApp(model, debug=args.debug, seed=args.seed)

    if args.arena:
        players = args.arena
        player_models = [import_player(player) for player in players]
        Arena(player_models, debug=args.debug).run_round_robin(seed=args.seed)

    print("Nothing else to do.")
Exemplo n.º 9
0
def array_to_arena(arena_2d):
    arena_obj = Arena()
    for x in range(20):
        for y in range(15):
            arena_obj.set(19-x, y, CellType(arena_2d[x][y]))
    return arena_obj
Exemplo n.º 10
0
from agent import HAgent, AAgent
from helper import *
from trainer import qtrainer
from environment import Env
import random
import numpy as np
import matplotlib.pyplot as plt

if __name__ == '__main__':
    # init size, num_humans, num_targets, amount of half cover
    # np.random.seed(1234)
    # init environment
    num = [20, 1, 1, 20]
    agents = {}
    targets = {}
    a = Arena(num[0], num[1], num[2])
    a, agents = place_soldiers(num[1], a, agents)
    a, targets, t_pos = place_targets(num[2], a, targets)
    a = place_half_cover(num[3], a)
    env = Env(a, agents, targets, num[1])

    Q, stat = qtrainer(env, 20, t_pos)
    plt.plot(range(20), stat['ep_rewards'])
    plt.xlabel('Episodes')
    plt.ylabel('Reward')
    plt.show()

    env = env.env_reset()

    pass
Exemplo n.º 11
0
from movement import Movement
from arena import Arena
from snake import Snake

arena = Arena()
snake = Snake(arena)
arena.setPoint(5, 5, 5)
arena.setPoint(21, 5, 2)
movement = Movement(arena, snake)


def test_isValidMovement():
    assert movement.isValidMovement(4, 4)
    assert not movement.isValidMovement(5, 5)


def test_isOppositeKey():
    assert movement.isOppositeKey('z', 's')
    assert not movement.isOppositeKey('s', 'd')


def test_moveSnake():
    movement.moveSnake('z')
    assert not movement.isGameOver()
    movement.moveSnake('d')
    assert movement.isGameOver()


def test_movePaddle():
    assert arena.getPoint(1, 4) == 4
    assert arena.getPoint(1, 3) == 0
Exemplo n.º 12
0
def example_2():
    """
	Example 2: Parameter study of various Players on a nonstationary Bandit.
	Produces the results in the form of a single plot.

	For a stationary Bandit, set all values of delta_mean_list and
	delta_stddev_list to 0.

	"""
    # Initialises the Arena and all required inputs.
    arena = Arena('base_problem')
    actions_list = [10]
    timesteps_list = [1000]
    runs_list = [2000]
    init_mean_list = [0]
    init_stddev_list = [1]
    action_stddev_list = [1]
    delta_mean_list = [0]
    delta_stddev_list = [0.01]
    first_considered_reward_step_list = [0]

    # Initialises the study ranges for all Players.
    epsilon_study_range = np.logspace(-7, -1, num=7, base=2.0,
                                      dtype=float).tolist()
    initial_Q_study_range = np.logspace(-2, 3, num=6, base=2.0,
                                        dtype=float).tolist()
    confidence_level_study_range = np.logspace(-4,
                                               3,
                                               num=8,
                                               base=2.0,
                                               dtype=float).tolist()
    step_size_parameter_study_range = np.logspace(-5,
                                                  2,
                                                  num=8,
                                                  base=2.0,
                                                  dtype=float).tolist()
    parameter_range = np.logspace(-8, 4, num=2, base=2.0, dtype=float).tolist()

    # Creates and adds Bandits to the Arena.
    arena.add_bandits([Bandit(*val) for val in zip(actions_list, timesteps_list, runs_list, first_considered_reward_step_list, \
     init_mean_list, init_stddev_list, action_stddev_list, delta_mean_list, delta_stddev_list)])

    # Creates and adds Players to the Arena.
    arena.add_players([
        QPlayer(0,
                epsilon_study_range[0],
                study_variable='epsilon',
                study_range=epsilon_study_range
                ),  # epsilon greedy, intial_q = 0 (study epsilon)
        QPlayer(
            0,
            epsilon_study_range[0],
            0.1,
            study_variable='epsilon',
            study_range=epsilon_study_range
        ),  # epsilon greedy with alpha 0.1, initial_Q = 0 (study epsilon)
        QPlayer(initial_Q_study_range[0],
                0,
                0.1,
                study_variable='initial_Q',
                study_range=initial_Q_study_range
                ),  # greedy with alpha 0.1 (study initial_Q)
        UCBQPlayer(0,
                   confidence_level_study_range[0],
                   study_variable='confidence_level',
                   study_range=confidence_level_study_range
                   ),  # UCB, initial_Q = 0 (study ucb_c)
        UCBQPlayer(0,
                   confidence_level_study_range[0],
                   0.1,
                   study_variable='confidence_level',
                   study_range=confidence_level_study_range
                   ),  # UCB, initial_Q = 0, alpha=0.1 (study ucb_c)
        GradientPlayer(step_size_parameter_study_range[0],
                       study_variable='step_size_parameter',
                       study_range=step_size_parameter_study_range)
    ])  # gradient bandit with baseline (study alpha)

    # Run the Arena in parameter study mode.
    arena.run('parameter_study', parameter_range)
Exemplo n.º 13
0
def test_moveUp():
    paddle = Paddle(Arena())
    assert paddle.getTop() == 4
    paddle.moveUp()
    assert paddle.getTop() == 3
Exemplo n.º 14
0
def test_getTop():
    paddle = Paddle(Arena())
    assert paddle.getTop() == 4
Exemplo n.º 15
0
    def learn(self):
        """
        Performs numIters iterations with numEps episodes of self-play in each
        iteration. After every iteration, it retrains neural network with
        examples in trainExamples (which has a maximum length of maxlenofQueue).
        It then pits the new neural network against the old one and accepts it
        only if it wins >= updateThreshold fraction of games.
        """

        for i in range(1, self.args['numIters'] + 1):
            # bookkeeping
            log.info(f'Starting Iter #{i} ...')
            # examples of the iteration
            if not self.skipFirstSelfPlay or i > 1:
                iteration_train_examples = deque(
                    [], maxlen=self.args['maxlenOfQueue'])

                for _ in tqdm(range(self.args['numEps']), desc="Self Play"):
                    self.mcts = MCTS(self.game, self.nnet,
                                     self.args)  # reset search tree
                    iteration_train_examples += self.execute_episode()

                # save the iteration examples to the history
                self.train_examples_history.append(iteration_train_examples)

            if len(self.train_examples_history
                   ) > self.args['numItersForTrainExamplesHistory']:
                log.warning(
                    f"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.train_examples_history)}"
                )
                self.train_examples_history.pop(0)
            # backup history to a file
            # NB! the examples were collected using the model from the previous iteration, so (i-1)
            self.save_train_examples(i - 1)

            # shuffle examples before training
            trainExamples = []
            for e in self.train_examples_history:
                trainExamples.extend(e)
            shuffle(trainExamples)

            # training new network, keeping a copy of the old one
            self.nnet.save_checkpoint(folder=self.args['checkpoint'],
                                      filename='temp.pth.tar')
            self.pnet.load_checkpoint(folder=self.args['checkpoint'],
                                      filename='temp.pth.tar')
            pmcts = MCTS(self.game, self.pnet, self.args)

            self.nnet.train(trainExamples)
            nmcts = MCTS(self.game, self.nnet, self.args)

            log.info('PITTING AGAINST PREVIOUS VERSION')
            arena = Arena(
                lambda x, y: np.argmax(pmcts.get_action_prob(x, y, temp=0)),
                lambda x, y: np.argmax(nmcts.get_action_prob(x, y, temp=0)),
                self.game)
            pwins, nwins, draws = arena.play_games(self.args['arenaCompare'])

            log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' %
                     (nwins, pwins, draws))
            if pwins + nwins == 0 or float(nwins) / (
                    pwins + nwins) < self.args['updateThreshold']:
                log.info('REJECTING NEW MODEL')
                self.nnet.load_checkpoint(folder=self.args['checkpoint'],
                                          filename='temp.pth.tar')
            else:
                log.info('ACCEPTING NEW MODEL')
                self.nnet.save_checkpoint(folder=self.args['checkpoint'],
                                          filename=self.get_checkpoint_file(i))
                self.nnet.save_checkpoint(folder=self.args['checkpoint'],
                                          filename='best.pth.tar')
Exemplo n.º 16
0
from arena.Arena import *

ShowBase()

Arena()

run()
Exemplo n.º 17
0
#        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
#        [1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
#        [1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1],
#        [1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1],
#        [1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
#        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
#        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
#        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
#        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
#    ], (8,12), (2,6))
# start = State(arena.start, None, "right", None, 0)

arena_str = input()
arena_obj = json.loads(arena_str)
arena = Arena(arena_obj["world"],
              (arena_obj["snake"]["x"], arena_obj["snake"]["y"]),
              (arena_obj["food"]["x"], arena_obj["food"]["y"]))
start = State(arena.start, None, arena_obj["direction"], None, 0)
end = State(arena.food, None, None, None, None)
if (arena_obj["method"] == "dijkstra"):
    solution = DSP.search(arena, start, end)
elif (arena_obj["method"] == "greedy"):
    solution = GBFS.search(arena, start, end)
else:
    solution = Astar.search(arena, start, end)
result = {}
if solution == None:
    result["status"] = "failure"
    result["message"] = "The snake cannot reach its food"
else:
    result["status"] = "success"
Exemplo n.º 18
0
from planet import Planet
from universe import Universe
from arena import Arena

if __name__ == '__main__':
    p1 = Planet(pos=[60, 70], name='p1', size=40)
    p1.print_location()
    p2 = Planet(pos=[150, 296], name='p2', size=20)
    p2.print_location()
    planet_map = {'p1': p1, 'p2': p2}
    u = Universe(planets=planet_map)
    u.run_sim(save_history=True, num_steps=1000)

    arena = Arena(u)
    arena.setup_universe()
    arena.animate_universe_history(frame_delay_ms=1)
Exemplo n.º 19
0
def test_clearPoint():
    arena = Arena()
    arena.setPoint(1, 1, 1)
    assert arena.getPoint(1, 1) == 1
    arena.clearPoint(1, 1)
    assert arena.getPoint(1, 1) == 0
Exemplo n.º 20
0
    def train(self):
        log.info("Starting training")
        for i in range(self.args["numIters"]):
            log.info("Starting iteration: %d", i)
            if not self.skip_first_self_play or i > 0:
                training_data = deque([], maxlen=self.args["maxlenOfQueue"])
                log.info("Starting to play episodes")
                for _ in tqdm(range(self.args["numEps"]), desc="Self Play"):
                    # Recreate the search tree at the current board
                    self.tree = TreeSearch(self.game, self.agent, self.args)
                    training_data += self.play_episode()
                self.past_train_examples.append(training_data)

            log.info("%d training examples available:",
                     len(self.past_train_examples))
            if len(self.past_train_examples
                   ) > self.args["numItersForTrainExamplesHistory"]:
                # We have too much data. Pop one
                log.info("Too many past training examples, removing one.")
                self.past_train_examples.pop(0)

            log.info("Finished playing episodes. Saving history")
            self.save_train_history(i)
            train_data = []
            for episode in self.past_train_examples:
                train_data.extend(episode)
            np.random.shuffle(train_data)

            # Load the old network into the opponent for self play test
            self.agent.save_checkpoint(folder=self.args["checkpoint"],
                                       filename="temp.pth.tar")
            self.opponent.load_checkpoint(folder=self.args["checkpoint"],
                                          filename="temp.pth.tar")
            opponent_tree_search = TreeSearch(self.game, self.opponent,
                                              self.args)

            self.agent.train(train_data)
            agent_tree_search = TreeSearch(self.game, self.agent, self.args)

            log.info("Starting self play for evaluation")
            arena = Arena(opponent_tree_search, agent_tree_search, self.game)
            opponent_wins, agent_wins, draws = arena.play_round(
                self.args["arenaCompare"])
            log.info("Opponent Wins: %d, Agent Wins: %d, Draws: %d",
                     opponent_wins, agent_wins, draws)

            if opponent_wins + agent_wins == 0 or agent_wins / (
                    opponent_wins + agent_wins) < self.args["updateThreshold"]:
                # Then we reject the model as there were all draws or our new model lost too many
                log.info(
                    "New model failed to beat old model. Loading old checkpoint."
                )
                self.agent.load_checkpoint(folder=self.args["checkpoint"],
                                           filename="temp.pth.tar")
            else:
                # Then our new agent is better than the last one so we should save it
                # We use i+1 as the index as this is the next model
                log.info("New model beat old model.")
                self.agent.save_checkpoint(
                    folder=self.args["checkpoint"],
                    filename=self.get_checkpoint_filename(i + 1))
                self.agent.save_checkpoint(folder=self.args["checkpoint"],
                                           filename='best.pth.tar')
Exemplo n.º 21
0
def test_getHeight():
    arena = Arena()
    assert arena.getHeight() == 15
Exemplo n.º 22
0
import os
import datetime
from time import sleep
import requests
from twython import Twython
from io import BytesIO
from arena import Arena
from credentials import ARENA_ACCESS_KEY, CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET
# Never commit API keys directly to your codebase
arena = Arena(ARENA_ACCESS_KEY)

# The Are.na channel you want to grab pictures from
chan = arena.channels.channel('your-arena-url')

twitter = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)
twitter.verify_credentials()
items, page = chan.contents()
for x in items:
    try:
        image = x.image['original']
        url = image['url']
        response = requests.get(url)
        photo = BytesIO(response.content)
        response = twitter.upload_media(media=photo)
        twitter.update_status(media_ids=[response['media_id']])
        print("tweeted! " + x.title)
        # Removes block from channel after it's been tweeted to
        # ensure it doesnt get tweeted again
        chan.remove_block(x.id)
        # Script runs every 10 minutes
        sleep(600)
Exemplo n.º 23
0
from arena import Arena
from abstract_character import AbstractCharacter
from knight_character import KnightCharacter
from mage_character import MageCharacter
from flask import Flask, request
import json

app = Flask(__name__)

CHARACTERS_DB = 'characters.sqlite'

arena = Arena(CHARACTERS_DB)


@app.route('/arena/characters', methods=['POST'])
def add_character():

    content = request.get_json()

    try:

        if content['type'] == 'knight':
            character = KnightCharacter(
                content['username'], content['health'], content['attack'],
                content['defence'], content['attack_speed'], content['type'],
                content['sword_crit_chance'], content['sword_crit_modifier'],
                content['shield_defence_modifier'])
        elif content['type'] == 'mage':
            character = MageCharacter(content['username'], content['health'],
                                      content['attack'], content['defence'],
                                      content['attack_speed'], content['type'],
Exemplo n.º 24
0
# -*-coding:utf-8-*-
from flask import Flask
from arena import Arena

app = Flask(__name__)

arena = Arena.Arena(10)

__all__ = ['routes', 'process']
from app import routes
Exemplo n.º 25
0
 def test_empty_arena(self):
     snapshot = Snapshot(WorldSnapshot(Arena(0, 0)), Pc(), [])
     self.assertRaises(Exception, lambda: snapshot_to_string(snapshot))
Exemplo n.º 26
0
        self.tcp_port = tcp_port
        self.buffer_size = buffer_size
        self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)


############################## below this is aiqing code ################

exploredArea = 0
cnt = 0  # no. of instruction executed
timeThreshold = 180
percentageLimit = 0.9
timeLimit = 360
reachGoal = 0
startTime = time.time()
robot = Robot()
realTimeMap = Arena()

frontCells = {
    0: [[[2, -1], [3, -1], [4, -1]], [[2, 0], [3, 0], [4, 0]],
        [[2, 1], [3, 1], [4, 1]]],
    1: [[[1, 2], [1, 3], [1, 4]], [[0, 2], [0, 3], [0, 4]],
        [[-1, 2], [-1, 3], [-1, 4]]],
    2: [[[-2, 1], [-3, 1], [-4, 1]], [[-2, 0], [-3, 0], [-4, 0]],
        [[-2, -1], [-3, -1], [-4, -1]]],
    3: [[[-1, -2], [-1, -3], [-1, -4]], [[0, -2], [0, -3], [0, -4]],
        [[1, -2], [1, -3], [1, -4]]]
}

# only keep top right and bottom right lines
rightCells = {
    0: [[[1, 2], [1, 3], [1, 4]], [[-1, 2], [-1, 3], [-1, 4]]],
Exemplo n.º 27
0
def main():
    pygame.init()

    # set the display mode
    pygame.display.set_caption('Arkanoid')
    screen = pygame.display.set_mode(SCREENRECT.size)

    # load images, assign to sprite classes
    spritesheet = Spritesheet('arinoid_master.bmp')

    # Status.score_image = spritesheet.imgat((0, 440, 103, 17), -1)

    Arena.tiles = spritesheet.imgsat([
        (129, 321, 31, 31),  # purple - 0
        (161, 321, 31, 31),  # dark blue - 1
        (129, 353, 31, 31),  # red - 2
        (161, 353, 31, 31),  # green - 3
        (129, 385, 31, 31)
    ])  # blue - 4

    # left border - 0, right border - 1,
    # special left border - 2, special right border - 3
    Arena.borders = spritesheet.imgsat([(129, 257, 31, 31), (193, 257, 31, 31),
                                        (129, 225, 31, 31), (193, 225, 31, 31)
                                        ]) + arena_h_borders(spritesheet)

    # yellow - 1, green - 2, red - 3, dark orange - 4,
    # purple - 5, orange - 6, light blue - 7, dark purple - 8
    # silver - 9, dark gray - 10
    Brick.images = spritesheet.imgsat([(225, 193, 31, 16), (225, 225, 31, 16),
                                       (225, 257, 31, 16), (225, 289, 31, 16),
                                       (257, 193, 31, 16), (257, 225, 31, 16),
                                       (257, 257, 31, 16), (257, 289, 31, 16),
                                       (129, 1, 31, 16), (97, 1, 31, 16)])

    levels = [
        [
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # level 1
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0],
            [0, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0],
            [0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0],
            [0, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0],
            [0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0],
            [0, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0],
            [0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        ],
        [
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # level 2
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
            [8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
            [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
            [8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
            [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
            [8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
            [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
            [8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        ],
        [
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # level 3
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 8, 8, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 8, 1, 1, 8, 0, 0, 0, 0],
            [0, 0, 0, 8, 1, 1, 1, 1, 8, 0, 0, 0],
            [0, 0, 8, 1, 1, 8, 8, 1, 1, 8, 0, 0],
            [0, 0, 8, 1, 1, 8, 8, 1, 1, 8, 0, 0],
            [0, 0, 0, 8, 1, 1, 1, 1, 8, 0, 0, 0],
            [0, 0, 0, 0, 8, 1, 1, 8, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 8, 8, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        ],
        [
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # level 4
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 3, 3, 9, 3, 3, 9, 3, 3, 9, 3, 0],
            [0, 7, 9, 7, 7, 9, 7, 7, 9, 7, 7, 0],
            [0, 3, 3, 9, 3, 3, 9, 3, 3, 9, 3, 0],
            [0, 7, 9, 7, 7, 9, 7, 7, 9, 7, 7, 0],
            [0, 3, 3, 9, 3, 3, 9, 3, 3, 9, 3, 0],
            [0, 7, 9, 7, 7, 9, 7, 7, 9, 7, 7, 0],
            [0, 3, 3, 9, 3, 3, 9, 3, 3, 9, 3, 0],
            [0, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        ],
        [
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # level 5
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [5, 5, 5, 5, 0, 10, 10, 10, 0, 0, 8, 8],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [2, 2, 2, 2, 2, 2, 10, 2, 2, 2, 2, 2],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [10, 3, 3, 0, 0, 0, 10, 0, 0, 3, 3, 10],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [3, 3, 3, 3, 3, 3, 10, 3, 3, 3, 3, 3],
            [0, 0, 0, 0, 0, 10, 10, 10, 0, 0, 0, 0],
            [10, 0, 0, 7, 7, 7, 10, 7, 7, 0, 0, 10],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [7, 7, 7, 7, 7, 7, 10, 7, 7, 7, 7, 7],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [9, 9, 0, 0, 0, 10, 10, 10, 0, 0, 9, 9],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        ],
        [
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # level 6 test
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 10, 10, 10, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 10, 10, 10, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0],
            [2, 0, 0, 0, 0, 10, 10, 10, 0, 0, 0, 2],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        ]
    ]

    Paddle.image = paddle_image(spritesheet)
    Ball.image = spritesheet.imgat((428, 300, 11, 11), -1)

    # create the background
    arena = Arena(SCREENRECT, levels)
    # screen.blit(arena.background, (0, 0))  # Attach arena bg to screen
    # pygame.display.update()  # update screen

    score_image(spritesheet, screen, arena)

    # # initialize game groups
    balls = pygame.sprite.Group()
    bricks = pygame.sprite.Group()
    all = pygame.sprite.RenderUpdates()

    # # keep track of time
    clock = pygame.time.Clock()

    # assign default groups to each sprite class
    Paddle.containers = all
    Ball.containers = all, balls
    Brick.containers = all, bricks
    Score.containers = all

    # initialize our starting sprites
    paddle = Paddle(arena)
    score = Score(arena)
    arena.makelevel(1)
    screen.blit(arena.background, (0, 0))  # Attach arena bg to screen
    pygame.display.update()  # update screen

    # game loop
    while 1:

        # get input
        for event in pygame.event.get():
            if event.type == QUIT \
                    or (event.type == KEYDOWN and event.key == K_ESCAPE):
                return

        # update score
        score.update_score()

        # check for win
        if arena.level_cleared:
            print('won')

        # clear sprites
        all.clear(screen, arena.background)

        # update sprites
        all.update()
        if not balls:
            Ball(arena, paddle, bricks)

        # redraw sprites
        dirty = all.draw(screen)
        pygame.display.update(dirty)

        # maintain frame rate
        clock.tick(30)
Exemplo n.º 28
0
def initialize():
    global startTime, robot, realTimeMap
    startTime = time.time()
    robot = Robot(1, 1, 1)
    robot.robotMode = "exploring"
    realTimeMap = Arena()
Exemplo n.º 29
0
def main():
    a = Arena()
    a.fight()
Exemplo n.º 30
0
def test_getDirection():
    paddle = Paddle(Arena())
    assert paddle.getDirection() == 0