def test_readme_example(self):
        """
        Tests deterministic functionality of RandomEnv.
        """
        from rlgraph.agents import DQNAgent
        from rlgraph.environments import OpenAIGymEnv

        environment = OpenAIGymEnv('CartPole-v0')
        config = config_from_path("../../examples/configs/dqn_cartpole.json")

        # Create from .json file or dict, see agent API for all
        # possible configuration parameters.
        agent = DQNAgent.from_spec(config,
                                   state_space=environment.state_space,
                                   action_space=environment.action_space)

        # Get an action, take a step, observe reward.
        state = environment.reset()
        preprocessed_state, action = agent.get_action(
            states=state, extra_returns="preprocessed_states")

        # Execute step in environment.
        next_state, reward, terminal, info = environment.step(action)

        # Observe result.
        agent.observe(preprocessed_states=preprocessed_state,
                      actions=action,
                      internals=[],
                      next_states=next_state,
                      rewards=reward,
                      terminals=terminal)

        # Call update when desired:
        loss = agent.update()
Beispiel #2
0
    def test_openai_atari_env(self):
        env = OpenAIGymEnv("Pong-v0")

        # Simple test runs with fixed actions.
        s = env.reset()
        # Assert we have pixels.
        self.assertGreaterEqual(np.mean(s), 0)
        self.assertLessEqual(np.mean(s), 255)
        accum_reward = 0.0
        for _ in range(100):
            s, r, t, _ = env.step(env.action_space.sample())
            assert isinstance(r, np.ndarray)
            assert r.dtype == np.float32
            assert isinstance(t, bool)
            self.assertGreaterEqual(np.mean(s), 0)
            self.assertLessEqual(np.mean(s), 255)
            accum_reward += r

        print("Accumulated Reward: ".format(accum_reward))