Ejemplo n.º 1
0
def play_episode(net, steps_in_episode):
    env = helicopter.Helicopter(n_row=N_ROW,
                                n_col=N_COL,
                                p_fire=P_FIRE,
                                p_tree=P_TREE,
                                tree=TREE,
                                fire=FIRE,
                                empty=EMPTY)
    obs = env.reset()
    total_reward = 0.0
    step_data = {}
    episode_steps = []
    episode_data = {}

    for step_num in range(steps_in_episode):
        grid, position = observations_to_tensors([obs])
        action = get_action(grid, position, net)

        next_obs, reward, is_done, info = env.step(action)

        total_reward += reward
        step_data['observation'] = obs
        step_data['action'] = action
        step_data['reward'] = reward
        step_data['step_number'] = step_num
        episode_steps.append(step_data)
        obs = next_obs
        step_data = {}

    episode_data['total_reward'] = total_reward
    episode_data['info'] = info
    episode_data['all_steps'] = episode_steps
    return episode_data
Ejemplo n.º 2
0
    def load_stage(self, stage, demo=False):  #, checkpoint=None):
        """Load a stage procedurally, or from a file. This method will import the named module, and load the class
        within. The class must be named Stage<Name> where <Name> is the capitalized version of the module name."""

        self.demo = demo

        classname = "Stage%s" % stage.title()
        module = __import__(stage)
        class_obj = getattr(module, classname)

        if not demo:

            self.helicopter = helicopter.Helicopter(400,
                                                    400,
                                                    batch=self.sprite_batch,
                                                    context=self)
            self.gunner = gunner.Gunner(400,
                                        400,
                                        batch=self.sprite_batch,
                                        context=self)
            self.rope = rope.Rope(length=ROPE_SEGMENTS,
                                  anchor_start=self.helicopter,
                                  anchor_end=self.gunner)
            self.message("GET READY SOLDIER")

        self.stage = class_obj(context=self)

        # Zero out all of the bullets
        self.bullets = []
        self.bullet_pool = []

        # Fill the bullet pool with dead bullet objects
        for i in range(BULLET_POOL):
            b = bullet.Bullet(x=0, y=0, batch=self.sprite_bullet_batch)
            self.bullet_pool.append(b)
            self.bullets.append(b)

        if self.stage.music == None:
            self.music_player = None
        else:
            self.music_player = pyglet.media.Player()
            self.music_player.queue(self.stage.music)
            self.music_player.eos_action = 'loop'
            self.music_player.play()
Ejemplo n.º 3
0
#CORES
white = (255, 255, 255)
black = (0, 0, 0)
gray = (50, 50, 50)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
yellow = (255, 255, 0)

for convert_sprites in sprites.all_sprites:
    convert_sprites.convert_alpha()

clock = pygame.time.Clock()
FPS = 30

player = helicopter.Helicopter(100, display_height / 2 - 40)
moving = True
godmode = False

#Pontos
score = 0
highscore_file = open('highscore.dat', "r")
highscore_int = int(highscore_file.read())

#NUVENS QUE PASSAM NO CENARIO
cloud_x = 800
cloud_y = random.randint(0, 400)

#VARIAVEIS DO INIMIGO
enemy_heli = enemy_heli.EnemyHeli(-100, display_height / 2 - 40)
enemy_heli_alive = False
Ejemplo n.º 4
0
# Environment parameters
N_ROW = 8
N_COL = 8
P_FIRE = 0.01
P_TREE = 0.30
# Symbols for cells
TREE = 3
FIRE = 7
EMPTY = 1

# Test environment to get some important attributes
env = helicopter.Helicopter(n_row=N_ROW,
                            n_col=N_COL,
                            p_fire=P_FIRE,
                            p_tree=P_TREE,
                            tree=TREE,
                            fire=FIRE,
                            empty=EMPTY)
N_ACTIONS = env.actions_cardinality
FREEZE_FRAMES = env.freeze

FOREST_ITERATIONS = 60
STEPS_PER_EPISODE = FREEZE_FRAMES * FOREST_ITERATIONS
EPOCHS = 1200
ITERATIONS = 2
BATCH_SIZE = 120
PERCENTILE = 90
CPUS = 1

# Take advantage of hardware if available
Ejemplo n.º 5
0
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from torch.utils.tensorboard import SummaryWriter

import helicopter

N_ROW = 1
N_COL = 9
P_FIRE = 0.05
P_TREE = 0.30
ENV = helicopter.Helicopter(n_row=N_ROW,
                            n_col=N_COL,
                            p_fire=P_FIRE,
                            p_tree=P_TREE,
                            tree=0.30,
                            empty=0.10,
                            fire=0.70)

# Total time of the DQN iterations
EPOCHS = 800000

# Target network
# Synchronize target network each x steps
SYNC_TARGET = 2000

# Replay Memory
REPLAY_SIZE = 800000
# Start learning at this size of the replay memory
REPLAY_START_SIZE = 1000
Ejemplo n.º 6
0
FPSCLOCK = pygame.time.Clock()
FPS = 30

# background variables
background_width = 1920

# newLife variables
red_heart_x = 800
red_heart_y = random.randint(0, 400)

# main menu helicopter (for decoration only)
heli_main_x = 800
heli_main_y = random.randint(0, 400)

# player variables
player = helicopter.Helicopter(100, HALF_WINHEIGHT-40)
moving = True

# score variables
score = 0
hard_mode = 1800
highscore_file = open('highscore.dat', "r")
highscore_int = highscore_file.read() #high score in integer

# enemy helicopter variables
enemy_heli = enemy_heli.EnemyHeli(-100, HALF_WINHEIGHT-40)
enemy_heli_alive = False

# missile variables
missile_x = 800
missile_y = player.y
Ejemplo n.º 7
0
    def load(self):
        levels = self.get_levels()
        if self.level_id > len(levels) - 1:
            self.app.set_state(main.WIN)
            self.app.score_manager.save()
        else:
            self.app.gui_manager.set_state(gui_manager.FADE_IN)

            self.app.game_manager.clear_level()

            self.level_name = levels[self.level_id]
            self.app.gui_manager.update_times(
                self.app.score_manager.run_scores.get(
                    util.get_filename(self.mode, self.level_name), 0),
                self.app.score_manager.get_record(self.mode, self.level_name))

            # map_data = levels.levels[self.level_id]()
            if self.mode == 0:
                directory = 'levels'
            elif self.mode == 1:
                directory = 'survival'

            with open('{}/{}.dat'.format(directory, self.level_name),
                      'rb') as f:
                map_data = pickle.load(f)

            scene = self.app.renderer.scene

            model_name, texture = map_data['terrain']
            self.app.game_manager.terrain = terrain.Terrain(
                self.app, model_name, texture)
            scene.add(self.app.game_manager.terrain.canvas)

            self.app.game_manager.player.spawn(map_data['spawn_pos'])

            if self.mode == 0:
                self.app.game_manager.goal.spawn(map_data['goal_pos'])
            elif self.mode == 1:
                self.app.game_manager.goal.despawn()

            for data in map_data['buildings']:
                b = building.Building(self.app,
                                      building.Building.data[data[0]],
                                      data[1:4], data[4])
                # b = building.Building(self.app, building.Building.data[data[0]], data[1:4], 0)
                self.app.game_manager.game_objects.add(b)
                scene.add(b.canvas)

            for data in map_data['platforms']:
                if data[0] == 0:
                    platform.Hedge(self.app, data[1:4])
                elif data[0] == 1:
                    platform.InvisiblePlatform(self.app, data[1:4])
                elif data[0] == 2:
                    platform.LavaPlatform(self.app, data[1:4])
                elif data[0] == 3:
                    platform.Trampoline(self.app, data[1:4])

            for data in map_data['elevators']:
                e = elevator.Elevator(self.app, data[1:4], data[4])
                self.app.game_manager.game_objects.add(e)  # spawn later
                scene.add(e.canvas)

            for data in map_data['powerups']:
                if data[0] == 0:
                    e = powerup.Fuel(self.app, data[1:4])
                    self.app.game_manager.game_objects.add(e)  # spawn later
                    scene.add(e.canvas)
                elif data[0] == 1:
                    e = powerup.Health(self.app, data[1:4])
                    self.app.game_manager.game_objects.add(e)  # spawn later
                    scene.add(e.canvas)
                elif data[0] == 2:
                    e = powerup.SlowTime(self.app, data[1:4])
                    self.app.game_manager.game_objects.add(e)  # spawn later
                    scene.add(e.canvas)

            for data in map_data['vehicles']:
                if data[0] == 0:
                    v = car.Car(self.app)
                elif data[0] == 1:
                    v = helicopter.Helicopter(self.app)
                v.spawn(data[1:4])

            for data in map_data['enemies']:
                if data[0] == 0:
                    enemy.Turret(self.app, data[1:4])
                elif data[0] == 1:
                    enemy.Bee(self.app, data[1:4])
                elif data[0] == 2:
                    enemy.BowlSpawner(self.app, data[1:4])
                elif data[0] == 3:
                    enemy.InvisibleEnemy(self.app, data[1:4])

            self.app.game_manager.set_state(self.mode)
Ejemplo n.º 8
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 18:17:54 2020

@author: ebecerra
"""

import helicopter
import numpy as np

env = helicopter.Helicopter(n_row = 8, n_col = 8,
  tree = 3,
  fire = 7,
  empty = 1, p_fire = 0.01, p_tree = 0.3)

# First observation
observation = env.reset()
env.render()

total_reward = 0
for i in range(env.freeze * 100):
  print('.', end='')
  action = np.random.choice(list(env.actions_set))
  observation, reward, done, info = env.step(action)
  total_reward += reward
  env.render()

print('\nTotal Reward: {}'.format(total_reward))

np.random.choice(env.actions_set)