Ejemplo n.º 1
0
def make_player(is_train=True, dump_dir=None):
    p = rl.GymRLEnviron(get_env('a3c.env_name'), dump_dir=dump_dir)
    p = rl.HistoryFrameProxyRLEnviron(p, get_env('a3c.nr_history_frames'))
    p = rl.LimitLengthProxyRLEnviron(p, get_env('a3c.max_nr_steps'))
    if is_train:
        p = rl.AutoRestartProxyRLEnviron(p)
    return p
Ejemplo n.º 2
0
def make_player(dump_dir=None):
    def resize_state(s):
        return image.resize(s, (84, 84), interpolation='NEAREST')

    p = rl.GymRLEnviron('Enduro-v0', dump_dir=dump_dir)
    p = rl.MapStateProxyRLEnviron(p, resize_state)
    p = rl.HistoryFrameProxyRLEnviron(p, 4)
    p = rl.LimitLengthProxyRLEnviron(p, 4000)
    return p
def make_player(is_train=True, dump_dir=None):
    def resize_state(s):
        return image.resize(s, get_env('a3c.input_shape'), interpolation='NEAREST')

    p = rl.GymRLEnviron(get_env('a3c.env_name'), dump_dir=dump_dir)
    p = rl.MapStateProxyRLEnviron(p, resize_state)
    p = rl.HistoryFrameProxyRLEnviron(p, get_env('a3c.nr_history_frames'))

    p = rl.LimitLengthProxyRLEnviron(p, get_env('a3c.max_nr_steps'))
    if is_train:
        p = rl.AutoRestartProxyRLEnviron(p)
    else:
        p = rl.GymPreventStuckProxyRLEnviron(p, get_env('a3c.inference.max_antistuck_repeat'), 1)
    return p
def make_player(is_train=True, dump_dir=None):
    def resize_state(s):
        return image.grayscale(
            image.resize(s,
                         get_env('dqn.input_shape'),
                         interpolation='NEAREST'))

    p = rl.GymAtariRLEnviron(get_env('dqn.env_name'),
                             live_lost_as_eoe=is_train,
                             dump_dir=dump_dir)
    p = rl.RepeatActionProxyRLEnviron(p, get_env('dqn.frame_skip'))
    p = rl.MapStateProxyRLEnviron(p, resize_state)
    p = rl.HistoryFrameProxyRLEnviron(p, get_env('dqn.nr_history_frames'))

    p = rl.LimitLengthProxyRLEnviron(p, get_env('dqn.max_nr_steps'))
    if not is_train:
        p = rl.GymPreventStuckProxyRLEnviron(
            p, get_env('dqn.inference.max_antistuck_repeat'), 1)
    return p
def make_player(dump_dir=None):
    p = rl.GymRLEnviron(get_env('ppo.env_name'), dump_dir=dump_dir)
    p = rl.LimitLengthProxyRLEnviron(p, get_env('ppo.max_nr_steps'))
    return p
Ejemplo n.º 6
0
def make_player(dump_dir=None):
    p = rl.GymRLEnviron('CartPole-v0', dump_dir=dump_dir)
    p = rl.LimitLengthProxyRLEnviron(p, 200)
    return p