示例#1
0
def collect_samples(agents,
                    num_timesteps,
                    gamma,
                    lam,
                    horizon,
                    observation_filter=NoFilter(),
                    reward_filter=NoFilter()):
    num_timesteps_so_far = 0
    trajectories = []
    total_rewards = []
    traj_len_means = []
    while num_timesteps_so_far < num_timesteps:
        trajectory_batch = ray.get([
            agent.compute_trajectory.remote(gamma, lam, horizon)
            for agent in agents
        ])
        trajectory = concatenate(trajectory_batch)
        trajectory = flatten(trajectory)
        not_done = np.logical_not(trajectory["dones"])
        total_rewards.append(
            trajectory["raw_rewards"][not_done].sum(axis=0).mean() /
            len(agents))
        traj_len_means.append(not_done.sum(axis=0).mean() / len(agents))
        trajectory = {key: val[not_done] for key, val in trajectory.items()}
        num_timesteps_so_far += len(trajectory["dones"])
        trajectories.append(trajectory)
    return (concatenate(trajectories), np.mean(total_rewards),
            np.mean(traj_len_means))
示例#2
0
    def testConcatenate(self):
        d1 = {"s": np.array([0, 1]), "a": np.array([2, 3])}
        d2 = {"s": np.array([4, 5]), "a": np.array([6, 7])}
        d = concatenate([d1, d2])
        assert_allclose(d["s"], np.array([0, 1, 4, 5]))
        assert_allclose(d["a"], np.array([2, 3, 6, 7]))

        D = concatenate([d])
        assert_allclose(D["s"], np.array([0, 1, 4, 5]))
        assert_allclose(D["a"], np.array([2, 3, 6, 7]))
示例#3
0
def collect_samples(agents,
                    config,
                    observation_filter=NoFilter(),
                    reward_filter=NoFilter()):
    num_timesteps_so_far = 0
    trajectories = []
    total_rewards = []
    trajectory_lengths = []
    # This variable maps the object IDs of trajectories that are currently
    # computed to the agent that they are computed on; we start some initial
    # tasks here.
    agent_dict = {
        agent.compute_steps.remote(config["gamma"], config["lambda"],
                                   config["horizon"],
                                   config["min_steps_per_task"]): agent
        for agent in agents
    }
    while num_timesteps_so_far < config["timesteps_per_batch"]:
        # TODO(pcm): Make wait support arbitrary iterators and remove the
        # conversion to list here.
        [next_trajectory
         ], waiting_trajectories = ray.wait(list(agent_dict.keys()))
        agent = agent_dict.pop(next_trajectory)
        # Start task with next trajectory and record it in the dictionary.
        agent_dict[agent.compute_steps.remote(
            config["gamma"], config["lambda"], config["horizon"],
            config["min_steps_per_task"])] = (agent)
        trajectory, rewards, lengths = ray.get(next_trajectory)
        total_rewards.extend(rewards)
        trajectory_lengths.extend(lengths)
        num_timesteps_so_far += len(trajectory["dones"])
        trajectories.append(trajectory)
    return (concatenate(trajectories), np.mean(total_rewards),
            np.mean(trajectory_lengths))
示例#4
0
文件: agent.py 项目: andyhyh/ray
    def compute_steps(self, gamma, lam, horizon, min_steps_per_task=-1):
        """Compute multiple rollouts and concatenate the results.

        Args:
            gamma: MDP discount factor
            lam: GAE(lambda) parameter
            horizon: Number of steps after which a rollout gets cut
            min_steps_per_task: Lower bound on the number of states to be
                collected.

        Returns:
            states: List of states.
            total_rewards: Total rewards of the trajectories.
            trajectory_lengths: Lengths of the trajectories.
        """
        num_steps_so_far = 0
        trajectories = []
        total_rewards = []
        trajectory_lengths = []
        while True:
            trajectory = self.compute_trajectory(gamma, lam, horizon)
            total_rewards.append(trajectory["raw_rewards"].sum(axis=0).mean())
            trajectory_lengths.append(
                np.logical_not(trajectory["dones"]).sum(axis=0).mean())
            trajectory = flatten(trajectory)
            not_done = np.logical_not(trajectory["dones"])
            # Filtering out states that are done. We do this because
            # trajectories are batched and cut only if all the trajectories
            # in the batch terminated, so we can potentially get rid of
            # some of the states here.
            trajectory = {
                key: val[not_done]
                for key, val in trajectory.items()
            }
            num_steps_so_far += trajectory["raw_rewards"].shape[0]
            trajectories.append(trajectory)
            if num_steps_so_far >= min_steps_per_task:
                break
        return concatenate(trajectories), total_rewards, trajectory_lengths
示例#5
0
def collect_samples(agents,
                    num_timesteps,
                    gamma,
                    lam,
                    horizon,
                    observation_filter=NoFilter(),
                    reward_filter=NoFilter()):
    num_timesteps_so_far = 0
    trajectories = []
    total_rewards = []
    traj_len_means = []
    # This variable maps the object IDs of trajectories that are currently
    # computed to the agent that they are computed on; we start some initial
    # tasks here.
    agent_dict = {
        agent.compute_trajectory.remote(gamma, lam, horizon): agent
        for agent in agents
    }
    while num_timesteps_so_far < num_timesteps:
        # TODO(pcm): Make wait support arbitrary iterators and remove the
        # conversion to list here.
        [next_trajectory
         ], waiting_trajectories = ray.wait(list(agent_dict.keys()))
        agent = agent_dict.pop(next_trajectory)
        # Start task with next trajectory and record it in the dictionary.
        agent_dict[agent.compute_trajectory.remote(gamma, lam,
                                                   horizon)] = (agent)
        trajectory = flatten(ray.get(next_trajectory))
        not_done = np.logical_not(trajectory["dones"])
        total_rewards.append(
            trajectory["raw_rewards"][not_done].sum(axis=0).mean())
        traj_len_means.append(not_done.sum(axis=0).mean())
        trajectory = {key: val[not_done] for key, val in trajectory.items()}
        num_timesteps_so_far += len(trajectory["dones"])
        trajectories.append(trajectory)
    return (concatenate(trajectories), np.mean(total_rewards),
            np.mean(traj_len_means))