Пример #1
0
    def finish_path(self, last_val: float) -> float:
        """
        Call this at the end of a trajectory, or when one gets cut off
        by an epoch ending. This looks back in the buffer to where the
        trajectory started, and uses rewards and value estimates from
        the whole trajectory to compute advantage estimates with GAE-Lambda,
        as well as compute the rewards-to-go for each state, to use as
        the targets for the value function.

        The "last_val" argument should be 0 if the trajectory ended
        because the agent reached a terminal state (died), and otherwise
        should be V(s_T), the value function estimated for the last state.
        This allows us to bootstrap the reward-to-go calculation to account
        for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
        """

        path_slice = slice(self.path_start_idx, self.ptr)
        rews = np.append(self.rew_buf[path_slice], last_val)
        vals = np.append(self.val_buf[path_slice], last_val)

        # the next two lines implement GAE-Lambda advantage calculation
        deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
        self.adv_buf[path_slice] = util.discount_cumsum(
            deltas, self.gamma * self.lam)

        # the next line computes rewards-to-go, to be targets for the value function
        self.ret_buf[path_slice] = util.discount_cumsum(rews, self.gamma)[:-1]
        episodic_return = self.ret_buf[self.path_start_idx]

        self.path_start_idx = self.ptr

        return episodic_return
Пример #2
0
    def test_cumsum(self):
        discount = 0.5
        x = np.ones(3, dtype=np.float32)
        y = discount_cumsum(x, discount=discount)

        self.assertAlmostEqual(y[0],
                               x[0] + discount * x[1] + discount**2 * x[2])
        self.assertAlmostEqual(y[1], x[1] + discount * x[2])
        self.assertAlmostEqual(y[2], x[2])
Пример #3
0
    def finish_path(self, last_val: float) -> Tuple[Optional[float], int]:
        """
        Call this at the end of a trajectory, or when one gets cut off
        by an epoch ending. This looks back in the buffer to where the
        trajectory started, and uses rewards and value estimates from
        the whole trajectory to compute advantage estimates with GAE-Lambda,
        as well as compute the rewards-to-go for each state, to use as
        the targets for the value function.

        The "last_val" argument should be 0 if the trajectory ended
        because the agent reached a terminal state (died), and otherwise
        should be V(s_T), the value function estimated for the last state.
        This allows us to bootstrap the reward-to-go calculation to account
        for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
        """

        if self.is_finished():
            return None, 0

        path_slice = slice(self.start_index, self.current_index)
        rews = np.array(self.rew_buf[path_slice] + [last_val])
        vals = np.array(self.val_buf[path_slice] + [last_val])

        # the next two lines implement GAE-Lambda advantage calculation
        deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
        self.adv_buf += util.discount_cumsum(deltas,
                                             self.gamma * self.lam).tolist()

        # the next line computes rewards-to-go, to be targets for the value function
        self.ret_buf += util.discount_cumsum(rews, self.gamma).tolist()[:-1]

        episodic_return = self.ret_buf[self.start_index]
        episode_length = self.current_index - self.start_index

        self.start_index = self.current_index

        # Ensure that all buffer fields have the same length
        assert all(
            len(getattr(self, field)) == self.current_index
            for field in DynamicPPOBuffer.BUFFER_FIELDS)

        return episodic_return, episode_length