Exemplo n.º 1
0
    def finish_path(self, last_val=0):
        """
        Call this at the end of a trajectory, or when one gets cut off
        by an epoch ending. This looks back in the buffer to where the
        trajectory started, and uses rewards and value estimates from
        the whole trajectory to compute advantage estimates with GAE-Lambda,
        as well as compute the rewards-to-go for each state, to use as
        the targets for the value function.

        The "last_val" argument should be 0 if the trajectory ended
        because the agent reached a terminal state (died), and otherwise
        should be V(s_T), the value function estimated for the last state.
        This allows us to bootstrap the reward-to-go calculation to account
        for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
        """

        path_slice = slice(self.path_start_idx, self.ptr)
        rews = np.append(self.rew_buf[path_slice], last_val)
        vals = np.append(self.val_buf[path_slice], last_val)

        # the next two lines implement GAE-Lambda advantage calculation
        deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
        self.adv_buf[path_slice] = core.discount_cumsum(
            deltas, self.gamma * self.lam)

        # the next line computes rewards-to-go, to be targets for the value function
        self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]

        self.path_start_idx = self.ptr
Exemplo n.º 2
0
    def finish_path(self, last_val=0):
        self.rew_buf.append(last_val)
        self.val_buf.append(last_val)

        rews = np.array(self.rew_buf)
        vals = np.array(self.val_buf)

        # the next two lines implement GAE-Lambda advantage calculation
        deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
        adv_buf = core.discount_cumsum(deltas, self.gamma * self.lam)

        # the next line computes rewards-to-go, to be targets for the value function
        ret_buf = core.discount_cumsum(rews, self.gamma)[:-1]

        trajectory = [dict(obs=obs, act=act, ret=ret, adv=adv, logp=logp) \
            for obs, act, ret, adv, logp in zip(self.obs_buf, self.act_buf, ret_buf, adv_buf, self.logp_buf)]

        self.reset()
        """ TODO: Advantage normalization """

        return trajectory
Exemplo n.º 3
0
    def finish_path(self, agent_index, last_val=0):
        """
        Call this at the end of a trajectory, or when one gets cut off
        by an epoch ending. This looks back in the buffer to where the
        trajectory started, and uses rewards and value estimates from
        the whole trajectory to compute advantage estimates with GAE-Lambda,
        as well as compute the rewards-to-go for each state, to use as
        the targets for the value function.

        The "last_val" argument should be 0 if the trajectory ended
        because the agent reached a terminal state (died), and otherwise
        should be V(s_T), the value function estimated for the last state.
        This allows us to estimate the reward-to-go calculation via bootstrapping
        to account for timesteps beyond the arbitrary episode horizon
        (or epoch cutoff).
        """

        i = agent_index

        path_slice = slice(self.path_start_idx[i], self.ptr[i])
        rews = np.append(self.rew_buf[i][path_slice], last_val)
        vals = np.append(self.val_buf[i][path_slice], last_val)

        # the next two lines implement GAE-Lambda advantage calculation
        deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
        self.adv_buf[i][path_slice] = core.discount_cumsum(
            deltas, self.gamma * self.lam)

        if self.shift_advs_pct:
            advs = self.adv_buf[i][path_slice]
            # TODO: Tie this to Entropy
            # 90 seems to work speed up convergence whereas 99 and 99.9 slow it down
            # perhaps something to do with size of 1 standard deviation
            self.adv_buf[i][path_slice] = advs - np.percentile(advs, self.shift_advs_pct)

        # the next line computes rewards-to-go, to be targets for the value function
        self.ret_buf[i][path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]

        self.path_start_idx[i] = self.ptr[i]