Beispiel #1
0
def discrete_atari_env():
    env = AtariEnvironment(name="MsPacman-v0",
                           clone_seeds=True,
                           autoreset=True)
    env.reset()
    env = DiscreteEnv(env)
    return env
Beispiel #2
0
 def __init__(self, swarm, *args, **kwargs):
     super(DistributedRam, self).__init__(swarm=swarm, *args, **kwargs)
     self.local_swarm = swarm()
     env = self.local_swarm.env
     env_name = env.name if isinstance(
         env, ParallelEnvironment) else env._env.name
     self.local_env = AtariEnvironment(name=env_name, clone_seeds=True)
     self.local_env.reset()
Beispiel #3
0
 def atari_environment():
     game_name = "MsPacman-v0"
     plangym_env = AtariEnvironment(
         name=game_name,
         clone_seeds=True,
         autoreset=True,
     )
     return DiscreteEnv(env=plangym_env)
Beispiel #4
0
 def atari_env():
     env = AtariEnvironment(name="MsPacman-v0",
                            clone_seeds=True,
                            autoreset=True)
     env.reset()
     env = DiscreteEnv(env)
     params = {
         "actions": {
             "dtype": np.int64
         },
         "critic": {
             "dtype": np.float32
         }
     }
     states = States(state_dict=params, batch_size=N_WALKERS)
     states.update(actions=np.ones(N_WALKERS), critic=np.ones(N_WALKERS))
     return env, states
Beispiel #5
0
def create_atari_swarm():
    env = AtariEnvironment(name="MsPacman-ram-v0", )
    dt = GaussianDt(min_dt=10, max_dt=100, loc_dt=5, scale_dt=2)
    swarm = Swarm(
        model=lambda x: DiscreteUniform(env=x, critic=dt),
        env=lambda: DiscreteEnv(env),
        n_walkers=6,
        max_epochs=10,
        reward_scale=2,
        reward_limit=1,
    )
    return swarm
Beispiel #6
0
def create_atari_swarm():
    env = AtariEnvironment(name="MsPacman-ram-v0",
                           clone_seeds=True,
                           autoreset=True)
    dt = GaussianDt(min_dt=3, max_dt=100, loc_dt=5, scale_dt=2)
    swarm = Swarm(
        model=lambda x: DiscreteUniform(env=x, critic=dt),
        walkers=Walkers,
        env=lambda: DiscreteEnv(env),
        n_walkers=67,
        max_epochs=500,
        reward_scale=2,
        reward_limit=751,
    )
    return swarm
Beispiel #7
0
def create_step_to_best_after_impr():
    from plangym import AtariEnvironment
    from fragile.core import GaussianDt

    env = AtariEnvironment(name="MsPacman-ram-v0", clone_seeds=True, autoreset=True)
    dt = GaussianDt(min_dt=3, max_dt=100, loc_dt=5, scale_dt=2)
    swarm = StepToBest(
        model=lambda x: DiscreteUniform(env=x, critic=dt),
        env=lambda: DiscreteEnv(env),
        reward_limit=-100,
        n_walkers=67,
        max_epochs=60,
        step_epochs=5,
        step_after_improvement=True,
    )
    return swarm
Beispiel #8
0
class DistributedRam(DistributedSwarm):
    def __init__(self, swarm, *args, **kwargs):
        super(DistributedRam, self).__init__(swarm=swarm, *args, **kwargs)
        self.local_swarm = swarm()
        env = self.local_swarm.env
        env_name = env.name if isinstance(
            env, ParallelEnvironment) else env._env.name
        self.local_env = AtariEnvironment(name=env_name, clone_seeds=True)
        self.local_env.reset()

    def image_from_state(self, state):
        self.local_env.set_state(state.astype(np.uint8).copy())
        self.local_env.step(0)
        return np.asarray(self.local_env._env.ale.getScreenRGB(),
                          dtype=np.uint8)

    def stream_progress(self, state, observation, reward):
        example = pd.DataFrame({"reward": [reward]},
                               index=[self.n_iters // self.n_swarms])
        self.stream.emit(example)
        obs = self.image_from_state(state)
        self.frame_pipe.send(obs)
Beispiel #9
0
def qbert_rgb():
    env = AtariEnvironment(name="Qbert-v0", clone_seeds=True, autoreset=True)
    env.reset()
    env = AtariEnv(env)
    return env
Beispiel #10
0
def pacman_ram():
    env = AtariEnvironment(name="MsPacman-ram-v0", clone_seeds=True, autoreset=True)
    env.reset()
    env = AtariEnv(env)
    return env