Exemple #1
0
    def test_cql_compilation(self):
        """Test whether a CQLTrainer can be built with all frameworks."""

        # Learns from a historic-data file.
        # To generate this data, first run:
        # $ ./train.py --run=SAC --env=Pendulum-v0 \
        #   --stop='{"timesteps_total": 50000}' \
        #   --config='{"output": "/tmp/out"}'
        rllib_dir = Path(__file__).parent.parent.parent.parent
        print("rllib dir={}".format(rllib_dir))
        data_file = os.path.join(rllib_dir, "tests/data/pendulum/small.json")
        print("data_file={} exists={}".format(data_file,
                                              os.path.isfile(data_file)))

        config = cql.CQL_DEFAULT_CONFIG.copy()
        config["env"] = "Pendulum-v0"
        config["input"] = [data_file]

        # In the files, we use here for testing, actions have already
        # been normalized.
        # This is usually the case when the file was generated by another
        # RLlib algorithm (e.g. PPO or SAC).
        config["actions_in_input_normalized"] = False
        config["clip_actions"] = True
        config["train_batch_size"] = 2000

        config["num_workers"] = 0  # Run locally.
        config["twin_q"] = True
        config["learning_starts"] = 0
        config["bc_iters"] = 2  # 2 BC iters, 2 CQL iters.
        config["rollout_fragment_length"] = 1

        # Switch on off-policy evaluation.
        config["input_evaluation"] = ["is"]

        config["evaluation_interval"] = 2
        config["evaluation_num_episodes"] = 10
        config["evaluation_config"]["input"] = "sampler"
        config["evaluation_parallel_to_training"] = False
        config["evaluation_num_workers"] = 2

        num_iterations = 4

        # Test for tf/torch frameworks.
        for fw in framework_iterator(config):
            trainer = cql.CQLTrainer(config=config)
            for i in range(num_iterations):
                results = trainer.train()
                check_train_results(results)
                print(results)
                eval_results = results.get("evaluation")
                if eval_results:
                    print(f"iter={trainer.iteration} "
                          f"R={eval_results['episode_reward_mean']}")

            check_compute_single_action(trainer)

            # Get policy and model.
            pol = trainer.get_policy()
            cql_model = pol.model
            if fw == "tf":
                pol.get_session().__enter__()

            # Example on how to do evaluation on the trained Trainer
            # using the data from CQL's global replay buffer.
            # Get a sample (MultiAgentBatch -> SampleBatch).
            from ray.rllib.agents.cql.cql import replay_buffer
            batch = replay_buffer.replay().policy_batches["default_policy"]

            if fw == "torch":
                obs = torch.from_numpy(batch["obs"])
            else:
                obs = batch["obs"]
                batch["actions"] = batch["actions"].astype(np.float32)

            # Pass the observations through our model to get the
            # features, which then to pass through the Q-head.
            model_out, _ = cql_model({"obs": obs})
            # The estimated Q-values from the (historic) actions in the batch.
            if fw == "torch":
                q_values_old = cql_model.get_q_values(
                    model_out, torch.from_numpy(batch["actions"]))
            else:
                q_values_old = cql_model.get_q_values(
                    tf.convert_to_tensor(model_out), batch["actions"])

            # The estimated Q-values for the new actions computed
            # by our trainer policy.
            actions_new = pol.compute_actions_from_input_dict({"obs": obs})[0]
            if fw == "torch":
                q_values_new = cql_model.get_q_values(
                    model_out, torch.from_numpy(actions_new))
            else:
                q_values_new = cql_model.get_q_values(model_out, actions_new)

            if fw == "tf":
                q_values_old, q_values_new = pol.get_session().run(
                    [q_values_old, q_values_new])

            print(f"Q-val batch={q_values_old}")
            print(f"Q-val policy={q_values_new}")

            if fw == "tf":
                pol.get_session().__exit__(None, None, None)

            trainer.stop()
Exemple #2
0
    def test_cql_compilation(self):
        """Test whether a CQLTrainer can be built with all frameworks."""

        # Learns from a historic-data file.
        # To generate this data, first run:
        # $ ./train.py --run=SAC --env=Pendulum-v0 \
        #   --stop='{"timesteps_total": 50000}' \
        #   --config='{"output": "/tmp/out"}'
        rllib_dir = Path(__file__).parent.parent.parent.parent
        print("rllib dir={}".format(rllib_dir))
        data_file = os.path.join(rllib_dir, "tests/data/pendulum/small.json")
        print("data_file={} exists={}".format(data_file,
                                              os.path.isfile(data_file)))

        config = cql.CQL_DEFAULT_CONFIG.copy()
        config["env"] = "Pendulum-v0"
        config["input"] = [data_file]

        config["num_workers"] = 0  # Run locally.
        config["twin_q"] = True
        config["clip_actions"] = False
        config["normalize_actions"] = True
        config["learning_starts"] = 0
        config["rollout_fragment_length"] = 1
        config["train_batch_size"] = 10

        # Switch on off-policy evaluation.
        config["input_evaluation"] = ["is"]

        num_iterations = 2

        # Test for tf framework (torch not implemented yet).
        for _ in framework_iterator(config, frameworks=("torch")):
            trainer = cql.CQLTrainer(config=config)
            for i in range(num_iterations):
                print(trainer.train())

            check_compute_single_action(trainer)

            # Get policy, model, and replay-buffer.
            pol = trainer.get_policy()
            cql_model = pol.model
            from ray.rllib.agents.cql.cql import replay_buffer

            # Example on how to do evaluation on the trained Trainer
            # using the data from our buffer.
            # Get a sample (MultiAgentBatch -> SampleBatch).
            batch = replay_buffer.replay().policy_batches["default_policy"]
            obs = torch.from_numpy(batch["obs"])
            # Pass the observations through our model to get the
            # features, which then to pass through the Q-head.
            model_out, _ = cql_model({"obs": obs})
            # The estimated Q-values from the (historic) actions in the batch.
            q_values_old = cql_model.get_q_values(
                model_out, torch.from_numpy(batch["actions"]))
            # The estimated Q-values for the new actions computed
            # by our trainer policy.
            actions_new = pol.compute_actions_from_input_dict({"obs": obs})[0]
            q_values_new = cql_model.get_q_values(
                model_out, torch.from_numpy(actions_new))
            print(f"Q-val batch={q_values_old}")
            print(f"Q-val policy={q_values_new}")

            trainer.stop()
Exemple #3
0
    # If you would like to query CQL's learnt Q-function for arbitrary
    # (cont.) actions, do the following:
    obs_batch = torch.from_numpy(np.random.random(size=(5, 3)))
    action_batch = torch.from_numpy(np.random.random(size=(5, 1)))
    q_values = cql_model.get_q_values(obs_batch, action_batch)
    # If you are using the "twin_q", there'll be 2 Q-networks and
    # we usually consider the min of the 2 outputs, like so:
    twin_q_values = cql_model.get_twin_q_values(obs_batch, action_batch)
    final_q_values = torch.min(q_values, twin_q_values)
    print(final_q_values)

    # Example on how to do evaluation on the trained Trainer
    # using the data from our buffer.
    # Get a sample (MultiAgentBatch -> SampleBatch).
    batch = replay_buffer.replay().policy_batches["default_policy"]
    obs = torch.from_numpy(batch["obs"])
    # Pass the observations through our model to get the
    # features, which then to pass through the Q-head.
    model_out, _ = cql_model({"obs": obs})
    # The estimated Q-values from the (historic) actions in the batch.
    q_values_old = cql_model.get_q_values(model_out,
                                          torch.from_numpy(batch["actions"]))
    # The estimated Q-values for the new actions computed
    # by our trainer policy.
    actions_new = pol.compute_actions_from_input_dict({"obs": obs})[0]
    q_values_new = cql_model.get_q_values(model_out,
                                          torch.from_numpy(actions_new))
    print(f"Q-val batch={q_values_old}")
    print(f"Q-val policy={q_values_new}")