Example #1
0
    def test_custom_input_procedure(self):
        class CustomJsonReader(JsonReader):
            def __init__(self, ioctx: IOContext):
                super().__init__(ioctx.input_config["input_files"], ioctx)

        def input_creator(ioctx: IOContext) -> InputReader:
            return ShuffledInput(CustomJsonReader(ioctx))

        register_input("custom_input", input_creator)
        test_input_procedure = [
            "custom_input",
            input_creator,
            "ray.rllib.examples.custom_input_api.CustomJsonReader",
        ]
        for input_procedure in test_input_procedure:
            for fw in framework_iterator(frameworks=("torch", "tf")):
                self.write_outputs(self.test_dir, fw)
                agent = PG(
                    env="CartPole-v0",
                    config={
                        "input": input_procedure,
                        "input_config": {
                            "input_files": self.test_dir + fw
                        },
                        "off_policy_estimation_methods": {},
                        "framework": fw,
                    },
                )
                result = agent.train()
                self.assertEqual(result["timesteps_total"], 250)
                self.assertTrue(np.isnan(result["episode_reward_mean"]))
Example #2
0
    def test_local(self):
        cf = DEFAULT_CONFIG.copy()
        cf["model"]["fcnet_hiddens"] = [10]
        cf["num_workers"] = 2

        for _ in framework_iterator(cf):
            agent = PG(cf, "CartPole-v0")
            print(agent.train())
            agent.stop()
Example #3
0
 def write_outputs(self, output, fw, output_config=None):
     agent = PG(
         env="CartPole-v0",
         config={
             "output": output + (fw if output != "logdir" else ""),
             "rollout_fragment_length": 250,
             "framework": fw,
             "output_config": output_config or {},
         },
     )
     agent.train()
     return agent
Example #4
0
 def test_multi_agent_with_flex_agents(self):
     register_env("flex_agents_multi_agent_cartpole",
                  lambda _: FlexAgentsMultiAgent())
     pg = PG(
         env="flex_agents_multi_agent_cartpole",
         config={
             "num_workers": 0,
             "framework": "tf",
         },
     )
     for i in range(10):
         result = pg.train()
         print("Iteration {}, reward {}, timesteps {}".format(
             i, result["episode_reward_mean"], result["timesteps_total"]))
Example #5
0
    def test_multi_agent_dict_invalid_sub_values(self):
        config = {"multiagent": {"count_steps_by": "invalid_value"}}
        self.assertRaisesRegex(
            ValueError,
            "config.multiagent.count_steps_by must be",
            lambda: PG(config, env="CartPole-v0"),
        )

        config = {"multiagent": {"replay_mode": "invalid_value"}}
        self.assertRaisesRegex(
            ValueError,
            "`config.multiagent.replay_mode` must be",
            lambda: PG(config, env="CartPole-v0"),
        )
Example #6
0
 def test_agent_input_list(self):
     for fw in framework_iterator(frameworks=("torch", "tf")):
         self.write_outputs(self.test_dir, fw)
         agent = PG(
             env="CartPole-v0",
             config={
                 "input": glob.glob(self.test_dir + fw + "/*.json"),
                 "off_policy_estimation_methods": {},
                 "rollout_fragment_length": 99,
                 "framework": fw,
             },
         )
         result = agent.train()
         self.assertEqual(result["timesteps_total"], 250)  # read from input
         self.assertTrue(np.isnan(result["episode_reward_mean"]))
Example #7
0
 def test_agent_input_dir(self):
     for fw in framework_iterator(frameworks=("torch", "tf")):
         self.write_outputs(self.test_dir, fw)
         print("WROTE TO: ", self.test_dir)
         agent = PG(
             env="CartPole-v0",
             config={
                 "input": self.test_dir + fw,
                 "off_policy_estimation_methods": {},
                 "framework": fw,
             },
         )
         result = agent.train()
         self.assertEqual(result["timesteps_total"], 250)  # read from input
         self.assertTrue(np.isnan(result["episode_reward_mean"]))
Example #8
0
 def test_agent_input_dict(self):
     for fw in framework_iterator():
         self.write_outputs(self.test_dir, fw)
         agent = PG(
             env="CartPole-v0",
             config={
                 "input": {
                     self.test_dir + fw: 0.1,
                     "sampler": 0.9,
                 },
                 "train_batch_size": 2000,
                 "framework": fw,
             },
         )
         result = agent.train()
         self.assertTrue(not np.isnan(result["episode_reward_mean"]))
Example #9
0
 def test_train_cartpole(self):
     register_env("test", lambda _: SimpleServing(gym.make("CartPole-v0")))
     config = {"num_workers": 0}
     for _ in framework_iterator(config, frameworks=("tf", "torch")):
         pg = PG(env="test", config=config)
         reached = False
         for i in range(80):
             result = pg.train()
             print("Iteration {}, reward {}, timesteps {}".format(
                 i, result["episode_reward_mean"],
                 result["timesteps_total"]))
             if result["episode_reward_mean"] >= 80:
                 reached = True
                 break
         if not reached:
             raise Exception("failed to improve reward")
Example #10
0
    def test_multi_agent(self):
        register_env(
            "multi_agent_cartpole", lambda _: MultiAgentCartPole({"num_agents": 10})
        )

        for fw in framework_iterator():
            pg = PG(
                env="multi_agent_cartpole",
                config={
                    "num_workers": 0,
                    "output": self.test_dir,
                    "multiagent": {
                        "policies": {"policy_1", "policy_2"},
                        "policy_mapping_fn": (
                            lambda aid, **kwargs: random.choice(
                                ["policy_1", "policy_2"]
                            )
                        ),
                    },
                    "framework": fw,
                },
            )
            pg.train()
            self.assertEqual(len(os.listdir(self.test_dir)), 1)

            pg.stop()
            pg = PG(
                env="multi_agent_cartpole",
                config={
                    "num_workers": 0,
                    "input": self.test_dir,
                    "off_policy_estimation_methods": {
                        "simulation": {"type": "simulation"}
                    },
                    "train_batch_size": 2000,
                    "multiagent": {
                        "policies": {"policy_1", "policy_2"},
                        "policy_mapping_fn": (
                            lambda aid, **kwargs: random.choice(
                                ["policy_1", "policy_2"]
                            )
                        ),
                    },
                    "framework": fw,
                },
            )
            for _ in range(50):
                result = pg.train()
                if not np.isnan(result["episode_reward_mean"]):
                    return  # simulation ok
                time.sleep(0.1)
            assert False, "did not see any simulation results"
Example #11
0
 def test_multiple_output_workers(self):
     ray.shutdown()
     ray.init(num_cpus=4, ignore_reinit_error=True)
     for fw in framework_iterator(frameworks=["tf", "torch"]):
         agent = PG(
             env="CartPole-v0",
             config={
                 "num_workers": 2,
                 "output": self.test_dir + fw,
                 "rollout_fragment_length": 250,
                 "framework": fw,
             },
         )
         agent.train()
         self.assertEqual(len(os.listdir(self.test_dir + fw)), 2)
         reader = JsonReader(self.test_dir + fw + "/*.json")
         reader.next()
Example #12
0
 def test_train_multi_agent_cartpole_single_policy(self):
     n = 10
     register_env("multi_agent_cartpole",
                  lambda _: MultiAgentCartPole({"num_agents": n}))
     pg = PG(
         env="multi_agent_cartpole",
         config={
             "num_workers": 0,
             "framework": "tf",
         },
     )
     for i in range(50):
         result = pg.train()
         print("Iteration {}, reward {}, timesteps {}".format(
             i, result["episode_reward_mean"], result["timesteps_total"]))
         if result["episode_reward_mean"] >= 50 * n:
             return
     raise Exception("failed to improve reward")
Example #13
0
 def test_agent_input_eval_sim(self):
     for fw in framework_iterator():
         self.write_outputs(self.test_dir, fw)
         agent = PG(
             env="CartPole-v0",
             config={
                 "input": self.test_dir + fw,
                 "off_policy_estimation_methods": {
                     "simulation": {"type": "simulation"}
                 },
                 "framework": fw,
             },
         )
         for _ in range(50):
             result = agent.train()
             if not np.isnan(result["episode_reward_mean"]):
                 return  # simulation ok
             time.sleep(0.1)
         assert False, "did not see any simulation results"
Example #14
0
    def test_multi_agent(self):
        register_env("multi_agent_cartpole",
                     lambda _: MultiAgentCartPole({"num_agents": 10}))

        for fw in framework_iterator():
            pg = PG(
                env="multi_agent_cartpole",
                config={
                    "num_workers": 0,
                    "output": self.test_dir + fw,
                    "multiagent": {
                        "policies": {"policy_1", "policy_2"},
                        "policy_mapping_fn":
                        (lambda aid, **kwargs: random.choice(
                            ["policy_1", "policy_2"])),
                    },
                    "framework": fw,
                },
            )
            pg.train()
            self.assertEqual(len(os.listdir(self.test_dir + fw)), 1)
            pg.stop()
            pg = PG(
                env="multi_agent_cartpole",
                config={
                    "num_workers": 0,
                    "input": self.test_dir + fw,
                    "train_batch_size": 2000,
                    "multiagent": {
                        "policies": {"policy_1", "policy_2"},
                        "policy_mapping_fn":
                        (lambda aid, **kwargs: random.choice(
                            ["policy_1", "policy_2"])),
                    },
                    "framework": fw,
                    "evaluation_interval": 1,
                    "evaluation_config": {
                        "input": "sampler"
                    },
                },
            )
            result = pg.train()
            assert np.isnan(
                result["episode_reward_mean"]
            ), "episode reward should not be computed for offline data"
            assert not np.isnan(
                result["evaluation"]["episode_reward_mean"]
            ), "Did not see simulation results during evaluation"
Example #15
0
    def test_nested_action_spaces(self):
        config = DEFAULT_CONFIG.copy()
        config["env"] = RandomEnv
        # Write output to check, whether actions are written correctly.
        tmp_dir = os.popen("mktemp -d").read()[:-1]
        if not os.path.exists(tmp_dir):
            # Last resort: Resolve via underlying tempdir (and cut tmp_.
            tmp_dir = ray._private.utils.tempfile.gettempdir() + tmp_dir[4:]
            assert os.path.exists(tmp_dir), f"'{tmp_dir}' not found!"
        config["output"] = tmp_dir
        # Switch off OPE as we don't write action-probs.
        # TODO: We should probably always write those if `output` is given.
        config["off_policy_estimation_methods"] = {}

        # Pretend actions in offline files are already normalized.
        config["actions_in_input_normalized"] = True

        for _ in framework_iterator(config):
            for name, action_space in SPACES.items():
                config["env_config"] = {
                    "action_space": action_space,
                }
                for flatten in [True, False]:
                    print(f"A={action_space} flatten={flatten}")
                    shutil.rmtree(config["output"])
                    config["_disable_action_flattening"] = not flatten
                    trainer = PG(config)
                    trainer.train()
                    trainer.stop()

                    # Check actions in output file (whether properly flattened
                    # or not).
                    reader = JsonReader(
                        inputs=config["output"],
                        ioctx=trainer.workers.local_worker().io_context,
                    )
                    sample_batch = reader.next()
                    if flatten:
                        assert isinstance(sample_batch["actions"], np.ndarray)
                        assert len(sample_batch["actions"].shape) == 2
                        assert sample_batch["actions"].shape[0] == len(
                            sample_batch)
                    else:
                        tree.assert_same_structure(
                            trainer.get_policy().action_space_struct,
                            sample_batch["actions"],
                        )

                    # Test, whether offline data can be properly read by a
                    # BCTrainer, configured accordingly.
                    config["input"] = config["output"]
                    del config["output"]
                    bc_trainer = BC(config=config)
                    bc_trainer.train()
                    bc_trainer.stop()
                    config["output"] = tmp_dir
                    config["input"] = "sampler"
Example #16
0
 def test_multi_agent_dict_bad_policy_ids(self):
     config = {
         "multiagent": {
             "policies": {1, "good_id"},
             "policy_mapping_fn": lambda aid, **kw: "good_id",
         }
     }
     self.assertRaisesRegex(
         KeyError,
         "Policy IDs must always be of type",
         lambda: PG(config, env="CartPole-v0"),
     )
Example #17
0
 def test_agent_input_eval_sampler(self):
     for fw in framework_iterator(frameworks=["tf", "torch"]):
         self.write_outputs(self.test_dir, fw)
         agent = PG(
             env="CartPole-v0",
             config={
                 "input": self.test_dir + fw,
                 "framework": fw,
                 "evaluation_interval": 1,
                 "evaluation_config": {
                     "input": "sampler"
                 },
             },
         )
         result = agent.train()
         assert np.isnan(
             result["episode_reward_mean"]
         ), "episode reward should not be computed for offline data"
         assert not np.isnan(
             result["evaluation"]["episode_reward_mean"]
         ), "Did not see simulation results during evaluation"
Example #18
0
 def test_callbacks(self):
     for fw in framework_iterator(frameworks=("torch", "tf")):
         counts = Counter()
         pg = PG(
             env="CartPole-v0",
             config={
                 "num_workers": 0,
                 "rollout_fragment_length": 50,
                 "train_batch_size": 50,
                 "callbacks": {
                     "on_episode_start":
                     lambda x: counts.update({"start": 1}),
                     "on_episode_step":
                     lambda x: counts.update({"step": 1}),
                     "on_episode_end": lambda x: counts.update({"end": 1}),
                     "on_sample_end":
                     lambda x: counts.update({"sample": 1}),
                 },
                 "framework": fw,
             },
         )
         pg.train()
         pg.train()
         self.assertGreater(counts["sample"], 0)
         self.assertGreater(counts["start"], 0)
         self.assertGreater(counts["end"], 0)
         self.assertGreater(counts["step"], 0)
         pg.stop()
Example #19
0
 def test_multi_agent_dict_invalid_subkeys(self):
     config = {
         "multiagent": {
             "wrong_key": 1,
             "policies": {"p0"},
             "policies_to_train": ["p0"],
         }
     }
     self.assertRaisesRegex(
         KeyError,
         "You have invalid keys in your",
         lambda: PG(config, env="CartPole-v0"),
     )
Example #20
0
 def test_query_evaluators(self):
     register_env("test", lambda _: gym.make("CartPole-v0"))
     for fw in framework_iterator(frameworks=("torch", "tf")):
         pg = PG(
             env="test",
             config={
                 "num_workers": 2,
                 "rollout_fragment_length": 5,
                 "num_envs_per_worker": 2,
                 "framework": fw,
                 "create_env_on_driver": True,
             },
         )
         results = pg.workers.foreach_worker(
             lambda ev: ev.rollout_fragment_length)
         results2 = pg.workers.foreach_worker_with_index(
             lambda ev, i: (i, ev.rollout_fragment_length))
         results3 = pg.workers.foreach_worker(
             lambda ev: ev.foreach_env(lambda env: 1))
         self.assertEqual(results, [10, 10, 10])
         self.assertEqual(results2, [(0, 10), (1, 10), (2, 10)])
         self.assertEqual(results3, [[1, 1], [1, 1], [1, 1]])
         pg.stop()
Example #21
0
    def test_agent_input_postprocessing_enabled(self):
        for fw in framework_iterator(frameworks=("tf", "torch")):
            self.write_outputs(self.test_dir, fw)

            # Rewrite the files to drop advantages and value_targets for
            # testing
            for path in glob.glob(self.test_dir + fw + "/*.json"):
                out = []
                with open(path) as f:
                    for line in f.readlines():
                        data = json.loads(line)
                        # Data won't contain rewards as these are not included
                        # in the write_outputs run (not needed in the
                        # SampleBatch). Flip out "rewards" for "advantages"
                        # just for testing.
                        data["rewards"] = data["advantages"]
                        del data["advantages"]
                        if "value_targets" in data:
                            del data["value_targets"]
                        out.append(data)
                with open(path, "w") as f:
                    for data in out:
                        f.write(json.dumps(data))

            agent = PG(
                env="CartPole-v0",
                config={
                    "input": self.test_dir + fw,
                    "off_policy_estimation_methods": {},
                    "postprocess_inputs": True,  # adds back 'advantages'
                    "framework": fw,
                },
            )

            result = agent.train()
            self.assertEqual(result["timesteps_total"], 250)  # read from input
            self.assertTrue(np.isnan(result["episode_reward_mean"]))
Example #22
0
 def test_no_step_on_init(self):
     register_env("fail", lambda _: FailOnStepEnv())
     for fw in framework_iterator():
         # We expect this to fail already on Trainer init due
         # to the env sanity check right after env creation (inside
         # RolloutWorker).
         self.assertRaises(
             Exception,
             lambda: PG(
                 env="fail",
                 config={
                     "num_workers": 2,
                     "framework": fw,
                 },
             ),
         )
Example #23
0
    def test_train_multi_agent_cartpole_multi_policy(self):
        n = 10
        register_env("multi_agent_cartpole",
                     lambda _: MultiAgentCartPole({"num_agents": n}))

        def gen_policy():
            config = {
                "gamma": random.choice([0.5, 0.8, 0.9, 0.95, 0.99]),
                "n_step": random.choice([1, 2, 3, 4, 5]),
            }
            return PolicySpec(config=config)

        pg = PG(
            env="multi_agent_cartpole",
            config={
                "num_workers": 0,
                "multiagent": {
                    "policies": {
                        "policy_1": gen_policy(),
                        "policy_2": gen_policy(),
                    },
                    "policy_mapping_fn": lambda aid, **kwargs: "policy_1",
                },
                "framework": "tf",
            },
        )

        # Just check that it runs without crashing
        for i in range(10):
            result = pg.train()
            print("Iteration {}, reward {}, timesteps {}".format(
                i, result["episode_reward_mean"], result["timesteps_total"]))
        self.assertTrue(
            pg.compute_single_action([0, 0, 0, 0], policy_id="policy_1") in
            [0, 1])
        self.assertTrue(
            pg.compute_single_action([0, 0, 0, 0], policy_id="policy_2") in
            [0, 1])
        self.assertRaisesRegex(
            KeyError,
            "not found in PolicyMap",
            lambda: pg.compute_single_action([0, 0, 0, 0],
                                             policy_id="policy_3"),
        )