Exemple #1
0
def create_env_fn():
    env = AnimalAIEnv(environment_filename=env_path,
                      worker_id=worker_id,
                      n_arenas=1,
                      arenas_configurations=arena_config_in,
                      retro=True)
    return env
def create_animal(num_actors=1, inference=True, config=None, seed=None):
    from animalai.envs.gym.environment import AnimalAIEnv
    from animalai.envs.arena_config import ArenaConfig
    import random
    from animalai_wrapper import AnimalWrapper, AnimalStack, AnimalSkip
    env_path = 'AnimalAI'
    worker_id = random.randint(1, 60000)
    arena_config_in = ArenaConfig(
        BASE_DIR + '/configs/learning/stage4/3-Food Moving.yaml')

    if config is None:
        config = arena_config_in
    else:
        config = ArenaConfig(config)
    if seed is None:
        seed = 0  #random.randint(0, 100500)

    env = AnimalAIEnv(environment_filename=env_path,
                      worker_id=worker_id,
                      n_arenas=num_actors,
                      seed=seed,
                      arenas_configurations=config,
                      greyscale=False,
                      docker_training=False,
                      inference=inference,
                      retro=False,
                      resolution=84)
    env = AnimalSkip(env, skip=SKIP_FRAMES)
    env = AnimalWrapper(env)
    env = AnimalStack(env,
                      VISUAL_FRAMES_COUNT,
                      VEL_FRAMES_COUNT,
                      greyscale=USE_GREYSCALE_OBSES)
    return env
Exemple #3
0
def create_env_fn():
    env = AnimalAIEnv(environment_filename=ENV_PATH,
                    worker_id=WORKER_ID,
                    n_arenas=1,
                    arenas_configurations=ArenaConfig(args.arena_config),
                    docker_training=False,
                    retro=False)
    return env
def create_env_fn():
    env = AnimalAIEnv(environment_filename=env_path,
                      worker_id=worker_id,
                      n_arenas=1,
                      arenas_configurations=arena_config_in,
                      docker_training=False,
                      retro=True,
                      inference=watch)
    return env
Exemple #5
0
def init_env(env_path, env_seed):
    env = AnimalAIEnv(environment_filename=env_path,
                      seed=env_seed,
                      retro=False,
                      n_arenas=1,
                      worker_id=1,
                      docker_training=False,
                      resolution=84)
    return env
Exemple #6
0
def create_env_fn():
    env = AnimalAIEnv(environment_filename=env_path,
                      worker_id=worker_id,
                      n_arenas=1,
                      arenas_configurations=arena_config_in,
                      docker_training=False,
                      retro=True)
    print("all good create_env_fn")
    return env
def create_animalai_environment(environment_path=None):
    """Wraps the Animal AI environment with some basic preprocessing.

    Returns:
      An Animal AI environment with some standard preprocessing.
    """
    assert environment_path is not None
    env = AnimalAIEnv(environment_path, 0, n_arenas=1, retro=True)
    env = OTCPreprocessing(env)
    return env
Exemple #8
0
 def create_env_fn(self):
     env = AnimalAIEnv(environment_filename=self.env_path,
                       worker_id=self.worker_id,
                       n_arenas=1,
                       arenas_configurations=self.arena_config_in,
                       docker_training=False,
                       retro=False,
                       inference=self.args['inference'],
                       seed=self.rank,
                       resolution=84)
     return env
Exemple #9
0
def create_env_fn():
    env = AnimalAIEnv(environment_filename=env_path,
                      worker_id=worker_id,
                      n_arenas=1,
                      arenas_configurations=arena_config_in,
                      docker_training=False,
                      retro=False,
                      inference=True,
                      resolution=84,
                      seed=random.randint(0, 100))
    return env
Exemple #10
0
def main():
    # Load the agent from the submission
    print('Loading your agent')
    try:
        spec = importlib.util.spec_from_file_location('agent_module',
                                                      '/aaio/agent.py')
        agent_module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(agent_module)
        submitted_agent = agent_module.Agent()
    except Exception as e:
        print(
            'Your agent could not be loaded, make sure all the paths are absolute, error thrown:'
        )
        raise e
    print('Agent successfully loaded')

    arena_config_in = ArenaConfig('/aaio/test/1-Food.yaml')

    print('Resetting your agent')
    try:
        submitted_agent.reset(t=arena_config_in.arenas[0].t)
    except Exception as e:
        print('Your agent could not be reset:')
        raise e

    env = AnimalAIEnv(
        environment_filename='/aaio/test/env/AnimalAI',
        # seed=0,
        retro=False,
        n_arenas=1,
        worker_id=1,
        docker_training=True,
    )

    env.reset(arenas_configurations=arena_config_in)
    obs, reward, done, info = env.step([0, 0])

    print('Running 5 episodes')

    for k in range(5):
        cumulated_reward = 0
        print('Episode {} starting'.format(k))
        try:
            for i in range(arena_config_in.arenas[0].t):

                action = submitted_agent.step(obs, reward, done, info)
                obs, reward, done, info = env.step(action)
                cumulated_reward += reward
                if done:
                    break
        except Exception as e:
            print('Episode {} failed'.format(k))
            raise e
        print('Episode {0} completed, reward {1}'.format(k, cumulated_reward))

    print('SUCCESS')
Exemple #11
0
 def env():
     worker_id = random.randint(1, 60000)
     env = AnimalAIEnv(environment_filename=env_path,
                   worker_id=worker_id,
                   n_arenas=num_actors,
                   seed = seed,
                   arenas_configurations=config,
                   greyscale = False,
                   docker_training=False,
                   inference = inference,
                   retro=False,
                   resolution=84
                   )
     env = AnimalSkip(env, skip=SKIP_FRAMES)                  
     env = AnimalWrapper(env)
     env = AnimalStack(env,VISUAL_FRAMES_COUNT, VEL_FRAMES_COUNT, greyscale=USE_GREYSCALE_OBSES)
     return env
Exemple #12
0
from animalai.envs.gym.environment import AnimalAIEnv
from animalai.envs.arena_config import ArenaConfig
import numpy as np
import random
import cv2

env_path = '../env/AnimalAI'
worker_id = random.randint(1, 100)
arena_config_in = ArenaConfig('ordered_configs/avoid_red/1-25-1.yml')

env = AnimalAIEnv(environment_filename=env_path,
                  worker_id=worker_id,
                  n_arenas=1,
                  arenas_configurations=arena_config_in,
                  docker_training=False,
                  retro=False)


done = False
number_of_episodes = 10
i = 0
SKIPPED_FRAMES = 10
state = env.reset()
print(state[0].shape)
print(state[0].ndim)
print(env.action_space.sample())
numpy_image = state[0] * 255
numpy_image = numpy_image.astype('uint8')
cv_image = cv2.cvtColor(numpy_image, cv2.COLOR_BGR2RGB)
resized = cv2.resize(cv_image, (300, 300), interpolation = cv2.INTER_AREA) 
cv2.imwrite('kk.png', resized)
Exemple #13
0
    def process_info(self, info):
        return {}


parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--weights', type=str, default=None)
parser.add_argument('--config-file', type=str, default=None)
args = parser.parse_args()
arena_config_in = ArenaConfig(args.config_file)

# Get the environment and extract the number of ENV_NAME
ENV_NAME = "AnimalAIEnv"
env = AnimalAIEnv(environment_filename='../env90deg/AnimalAILinux',
                  worker_id=worker_id,
                  n_arenas=1,
                  arenas_configurations=arena_config_in,
                  docker_training=False,
                  retro=False)

np.random.seed(123)
env.seed(123)
nb_actions = 5

# Next, we build our model. We use the same model that was described by Mnih et al. (2015).
input_shape = (WINDOW_LENGTH,) + INPUT_SHAPE
print(input_shape)
model = Sequential()
# if K.common.image_dim_ordering() == 'tf':
#     # (width, height, channels)
model.add(Permute((2, 3, 1), input_shape=input_shape))
model.add(Convolution2D(32, (8, 8), strides=(4, 4)))