def create_atari_env(env_id): env = gym.make(env_id) env = Vectorize(env) env = AtariRescale42x42(env) env = DiagnosticsInfo(env) env = Unvectorize(env) env.runner = RunnerThread(env, 20) return env
def create_atari_env(env_id, **kwargs): env = gym.make(env_id) env = Vectorize(env) env = AtariRescale84x84(env) env = DiagnosticsInfo(env) env = Unvectorize(env) env.atari = True return env
def make_environment(name, **args): """Make an environment of a given name, possibly using extra arguments.""" env = environment_registry.get(name, Environment) if name in environment_registry: env = env(**args) else: env = env(name, **args) if "atari.atari_env" in env.unwrapped.__module__: to_dict = env.to_dict env = Vectorize(env) env = AtariRescale42x42(env) env = Unvectorize(env) env.to_dict = to_dict return env
def create_vizdoom_env(env_id): env = gym.make(env_id) env = Vectorize(env) env = VizdoomRescale84x84x3(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_atari_env(env_id): env = gym.make(env_id) env = Vectorize(env) env = AtariRescale42x42(env) env = Unvectorize(env) return env
def create_atari_env(env_id): env = gym.make(env_id) env = Vectorize(env) env = AtariRescale(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_gym_environment(env_id, **kwargs): # spec = gym.spec(env_id) # get_gym_submodules_and_environments() # IPython.embed() # env_name, env_version = env_id.split('-') # envall = gym.envs.registry.all() # atari_games = atari_py.list_games() env = gym.make(env_id) if env_id in [ 'CartPole-v0', 'CartPole-v1', 'Acrobot-v1', 'MountainCar-v0', 'Pendulum-v0' ]: pass elif env_id in ['BipedalWalker-v2', 'LunarLander-v2']: pass elif env_id in ['Humanoid-v2']: pass # env = NormalizedEnv() else: env = Vectorize(env) env = AtariPreProcessorMnih2015(env) env = Unvectorize(env) # env = Vectorize(env) # env = AtariRescale(env, square_size=kwargs.get('square_size', 42)) # env = Unvectorize(env) return env
def create_pixels_env(env_id, **kwargs): env = gym.make(env_id) env = Vectorize(env) env = create_adversarial_env(env, **kwargs) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_atari_env_deepmind(env_id, mode, **kwargs): env = gym.make(env_id) env = wrap_deepmind(env, mode, **kwargs) env = Vectorize(env) env = DiagnosticsInfo(env, mode) env = Unvectorize(env) return env
def create_atari_env(env_id, monitor_logdir=None, wrappers='deepmind', policy='rnn', num_buffer_frames=4, max_repeats=0, **_): env = gym.make(env_id) # start monitor to record statistics and videos if monitor_logdir: env = Monitor(env, monitor_logdir, video_callable=False, resume=True) if wrappers == 'deepmind': from common.atari_wrappers import wrap_deepmind env = wrap_deepmind(env) elif wrappers == 'universe': from universe.wrappers import Vectorize, Unvectorize from common.universe_wrappers import AtariRescale42x42, DiagnosticsInfo env = Vectorize(env) env = AtariRescale42x42(env) env = DiagnosticsInfo(env) env = Unvectorize(env) if policy == 'cnn' and num_buffer_frames > 0: env = ObservationBuffer(env, num_buffer_frames) if max_repeats > 0: env = FrameskipWrapper(env, max_repeats) return env
def wrap_generic_env(env, atari=False): env = Vectorize(env) if atari: env = AtariRescale42x42(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_atari_env(env_id): env = gym.make(env_id) env = Vectorize(env) env = AtariRescale(env) env = NormalizedEnv(env) env = Unvectorize(env) return env
def create_mupen_env(env_id): env = gym.make(env_id) env = Vectorize(env) env = MupenRescale(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_doom(env_id, client_id, envWrap=True, record=False, outdir=None, noLifeReward=False, acRepeat=0, **_): from ppaquette_gym_doom import wrappers if 'labyrinth' in env_id.lower(): if 'single' in env_id.lower(): env_id = 'ppaquette/LabyrinthSingle-v0' elif 'fix' in env_id.lower(): env_id = 'ppaquette/LabyrinthManyFixed-v0' else: env_id = 'ppaquette/LabyrinthMany-v0' elif 'very' in env_id.lower(): env_id = 'ppaquette/DoomMyWayHomeFixed15-v0' elif 'sparse' in env_id.lower(): env_id = 'ppaquette/DoomMyWayHomeFixed-v0' elif 'fix' in env_id.lower(): if '1' in env_id or '2' in env_id: env_id = 'ppaquette/DoomMyWayHomeFixed' + str(env_id[-2:]) + '-v0' elif 'new' in env_id.lower(): env_id = 'ppaquette/DoomMyWayHomeFixedNew-v0' else: env_id = 'ppaquette/DoomMyWayHomeFixed-v0' else: env_id = 'ppaquette/DoomMyWayHome-v0' # VizDoom workaround: Simultaneously launching multiple vizdoom processes # makes program stuck, so use the global lock in multi-threading/processing client_id = int(client_id) time.sleep(client_id * 10) env = gym.make(env_id) modewrapper = wrappers.SetPlayingMode('algo') obwrapper = wrappers.SetResolution('160x120') acwrapper = wrappers.ToDiscrete('minimal') env = modewrapper(obwrapper(acwrapper(env))) # env = env_wrapper.MakeEnvDynamic(env) # to add stochasticity if record and outdir is not None: env = gym.wrappers.Monitor(env, outdir, force=True) if envWrap: fshape = (42, 42) frame_skip = acRepeat if acRepeat > 0 else 4 env.seed(None) if noLifeReward: env = env_wrapper.NoNegativeRewardEnv(env) env = env_wrapper.BufferedObsEnv(env, skip=frame_skip, shape=fshape) env = env_wrapper.SkipEnv(env, skip=frame_skip) elif noLifeReward: env = env_wrapper.NoNegativeRewardEnv(env) env = Vectorize(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_atari_env(env_name, evaluate=False, run_name=None): env = gym.make(env_name) env = Vectorize(env) env = Bitwise(env) env = Unvectorize(env) if evaluate: env = wrappers.Monitor(env, 'monitor/' + run_name) return env
def create_atari_env_simple(env_id, mode, **kwargs): "Legacy code from starter-agent" env = gym.make(env_id) env = Vectorize(env) env = AtariRescale42x42(env) env = DiagnosticsInfo(env, mode) env = Unvectorize(env) return env
def create_generic_env(env_id, atari=False): env = gym.make(env_id) env = Vectorize(env) if atari: env = AtariRescale42x42(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_atari_env(env_id): env = gym.make(env_id) env = Vectorize(env) #env = AtariRescale42x42(env) env = AtariNormalizeInput(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def atari_env(env_id, env_conf): env = gym.make(env_id) if len(env.observation_space.shape) > 1: env = Vectorize(env) env = AtariRescale(env, env_conf) env = NormalizedEnv(env) env = Unvectorize(env) return env
def create_car_racing_env(): env = gym.make('CarRacing-v0') env = Vectorize(env) env = CarRacingRescale32x32(env) env = NormalizedEnv(env) env = CarRacingDiscreteActions(env) env = Unvectorize(env) return env
def create_atari_env(env_id): env = gym.make(env_id) if len(env.observation_space.shape) > 1: env = Vectorize(env) env = AtariRescale42x42(env) env = NormalizedEnv(env) env = Unvectorize(env) return env
def create_atari_env(env_id, record=False, outdir=None, **_): env = gym.make(env_id) if record and outdir is not None: env = gym.wrappers.Monitor(env, outdir, force=True) env = Vectorize(env) env = AtariRescale42x42(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_atari_env(env_id): # essential in my experiments env = gym.make(env_id) env = Vectorize(env) #env = AtariRescale42x42(env) env = AtariRescale84x84(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_atari_env(env_id, seed=None): env = gym.make(env_id) if seed is not None: env.seed(seed) env = Vectorize(env) env = AtariRescale42x42(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_atari_env(env_id): env = gym.make(env_id) env = Vectorize(env) env = AtariRescale42x42(env) env = DiagnosticsInfo(env) env = Unvectorize(env) if config.overwirite_with_grid: env.action_space.n = 4 return env
def create_unrealcv_env(env_full_name): env = gym.make(env_full_name) env = Vectorize(env) env = UnrealCVRescale84x84x3(env) env = DiagnosticsInfo(env) env = Unvectorize(env) # must be discrete action # assert env.action_type == 'discrete' outdir = '/tmp/random-agent-results' env = wrappers.Monitor(env, directory=outdir, force=True) return env
def create_atari_env(env_id, frame_size=84): env = gym.make(env_id) env = Vectorize(env) if frame_size == 42: env = AtariRescale42x42(env) else: # Preprocess frames followed from DQN 2013. env = AtariRescale84x84(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_atari_env(env_id): env = gym.make(env_id) if len(env.observation_space.shape) > 1: print('Preprocessing env') env = Vectorize(env) env = AtariRescale42x42(env) env = NormalizedEnv(env) env = Unvectorize(env) else: print('No preprocessing because env is too small') return env
def create_atari_env(env_id, seed): env = gym.make(env_id) env.seed(seed) # Process the environment env = Vectorize(env) env = Rescale(env) env = DiagnosticsInfo(env) env = Unvectorize(env) return env
def create_ple_env(env_id, **_): env = gym.make(env_id) env = Vectorize(env) env = PleRescale(env) #env = DiagnosticsInfo(env) #env = NormalizedEnv(env) #env = Unvectorize(env) if "multi_obs_settings" in _.keys(): _multi = [env] + list(_["multi_obs_settings"]) env = SplitScreen(*_multi) #env = gym.wrappers.Monitor(env, '/tmp/catcher-experiment-1') return env