Ejemplo n.º 1
0
class ReplayBuffer(AbstractReplayBuffer):

    def __init__(self, capacity=None):
        self.memory = RandomAccessQueue(maxlen=capacity)

    def append(self, state, action, reward, next_state=None, next_action=None,
               is_state_terminal=False):
        experience = dict(state=state, action=action, reward=reward,
                          next_state=next_state, next_action=next_action,
                          is_state_terminal=is_state_terminal)
        self.memory.append(experience)

    def sample(self, n):
        assert len(self.memory) >= n
        return self.memory.sample(n)

    def __len__(self):
        return len(self.memory)

    def save(self, filename):
        with open(filename, 'wb') as f:
            pickle.dump(self.memory, f)

    def load(self, filename):
        with open(filename, 'rb') as f:
            self.memory = pickle.load(f)
        if isinstance(self.memory, collections.deque):
            # Load v0.2
            self.memory = RandomAccessQueue(
                self.memory, maxlen=self.memory.maxlen)

    def stop_current_episode(self):
        pass
Ejemplo n.º 2
0
class ReplayBuffer(object):
    def __init__(self, capacity=None):
        self.memory = RandomAccessQueue(maxlen=capacity)

    def append(self,
               state,
               action,
               reward,
               next_state=None,
               next_action=None,
               is_state_terminal=False):
        """Append a transition to this replay buffer

        Args:
            state: s_t
            action: a_t
            reward: r_t
            next_state: s_{t+1} (can be None if terminal)
            next_action: a_{t+1} (can be None for off-policy algorithms)
            is_state_terminal (bool)
        """
        experience = dict(state=state,
                          action=action,
                          reward=reward,
                          next_state=next_state,
                          next_action=next_action,
                          is_state_terminal=is_state_terminal)
        self.memory.append(experience)

    def sample(self, n):
        """Sample n unique samples from this replay buffer"""
        assert len(self.memory) >= n
        return self.memory.sample(n)

    def __len__(self):
        return len(self.memory)

    def save(self, filename):
        with open(filename, 'wb') as f:
            pickle.dump(self.memory, f)

    def load(self, filename):
        with open(filename, 'rb') as f:
            self.memory = pickle.load(f)

    def stop_current_episode(self):
        pass
Ejemplo n.º 3
0
class EpisodicReplayBuffer(AbstractEpisodicReplayBuffer):

    def __init__(self, capacity=None):
        self.current_episode = []
        self.episodic_memory = RandomAccessQueue()
        self.memory = RandomAccessQueue()
        self.capacity = capacity

    def append(self, state, action, reward, next_state=None, next_action=None,
               is_state_terminal=False, **kwargs):
        experience = dict(state=state, action=action, reward=reward,
                          next_state=next_state, next_action=next_action,
                          is_state_terminal=is_state_terminal,
                          **kwargs)
        self.current_episode.append(experience)
        if is_state_terminal:
            self.stop_current_episode()

    def sample(self, n):
        assert len(self.memory) >= n
        return self.memory.sample(n)

    def sample_episodes(self, n_episodes, max_len=None):
        assert len(self.episodic_memory) >= n_episodes
        episodes = self.episodic_memory.sample(n_episodes)
        if max_len is not None:
            return [random_subseq(ep, max_len) for ep in episodes]
        else:
            return episodes

    def __len__(self):
        return len(self.memory)

    @property
    def n_episodes(self):
        return len(self.episodic_memory)

    def save(self, filename):
        with open(filename, 'wb') as f:
            pickle.dump((self.memory, self.episodic_memory), f)

    def load(self, filename):
        with open(filename, 'rb') as f:
            memory = pickle.load(f)
        if isinstance(memory, tuple):
            self.memory, self.episodic_memory = memory
        else:
            # Load v0.2
            # FIXME: The code works with EpisodicReplayBuffer
            # but not with PrioritizedEpisodicReplayBuffer
            self.memory = RandomAccessQueue(memory)
            self.episodic_memory = RandomAccessQueue()

            # Recover episodic_memory with best effort.
            episode = []
            for item in self.memory:
                episode.append(item)
                if item['is_state_terminal']:
                    self.episodic_memory.append(episode)
                    episode = []

    def stop_current_episode(self):
        if self.current_episode:
            self.episodic_memory.append(self.current_episode)
            self.memory.extend(self.current_episode)
            self.current_episode = []
            while self.capacity is not None and \
                    len(self.memory) > self.capacity:
                discarded_episode = self.episodic_memory.popleft()
                for _ in range(len(discarded_episode)):
                    self.memory.popleft()
        assert not self.current_episode
Ejemplo n.º 4
0
class ReplayBuffer(replay_buffer.AbstractReplayBuffer):
    """Experience Replay Buffer

    As described in
    https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf.

    Args:
        capacity (int): capacity in terms of number of transitions
        num_steps (int): Number of timesteps per stored transition
            (for N-step updates)
    """
    def __init__(self, capacity=None, num_steps=1):
        self.capacity = capacity
        assert num_steps > 0
        self.num_steps = num_steps
        self.memory = RandomAccessQueue(maxlen=capacity)
        self.last_n_transitions = collections.defaultdict(
            lambda: collections.deque([], maxlen=num_steps))

    def append(self,
               state,
               action,
               reward,
               next_state=None,
               next_action=None,
               is_state_terminal=False,
               env_id=0,
               **kwargs):
        last_n_transitions = self.last_n_transitions[env_id]
        experience = dict(state=state,
                          action=action,
                          reward=reward,
                          next_state=next_state,
                          next_action=next_action,
                          is_state_terminal=is_state_terminal,
                          **kwargs)
        last_n_transitions.append(experience)
        if is_state_terminal:
            while last_n_transitions:
                self.memory.append(list(last_n_transitions))
                del last_n_transitions[0]
            assert len(last_n_transitions) == 0
        else:
            if len(last_n_transitions) == self.num_steps:
                self.memory.append(list(last_n_transitions))

    def stop_current_episode(self, env_id=0):
        last_n_transitions = self.last_n_transitions[env_id]
        # if n-step transition hist is not full, add transition;
        # if n-step hist is indeed full, transition has already been added;
        if 0 < len(last_n_transitions) < self.num_steps:
            self.memory.append(list(last_n_transitions))
        # avoid duplicate entry
        if 0 < len(last_n_transitions) <= self.num_steps:
            del last_n_transitions[0]
        while last_n_transitions:
            self.memory.append(list(last_n_transitions))
            del last_n_transitions[0]
        assert len(last_n_transitions) == 0

    def sample(self, num_experiences):
        assert len(self.memory) >= num_experiences
        return self.memory.sample(num_experiences)

    def __len__(self):
        return len(self.memory)

    def save(self, filename):
        with open(filename, 'wb') as f:
            pickle.dump(self.memory, f)

    def load(self, filename):
        with open(filename, 'rb') as f:
            self.memory = pickle.load(f)
        if isinstance(self.memory, collections.deque):
            # Load v0.2
            self.memory = RandomAccessQueue(self.memory,
                                            maxlen=self.memory.maxlen)
Ejemplo n.º 5
0
class ReplayBuffer(AbstractReplayBuffer):
    def __init__(self, capacity=None, num_steps=1):
        self.capacity = capacity
        assert num_steps > 0
        self.num_steps = num_steps
        self.memory = RandomAccessQueue(maxlen=capacity)
        self.last_n_transitions = collections.deque([], maxlen=num_steps)

    def append(self,
               state,
               action,
               reward,
               next_state=None,
               next_action=None,
               is_state_terminal=False):
        experience = dict(state=state,
                          action=action,
                          reward=reward,
                          next_state=next_state,
                          next_action=next_action,
                          is_state_terminal=is_state_terminal)
        self.last_n_transitions.append(experience)
        if is_state_terminal:
            while self.last_n_transitions:
                self.memory.append(list(self.last_n_transitions))
                del self.last_n_transitions[0]
            assert len(self.last_n_transitions) == 0
        else:
            if len(self.last_n_transitions) == self.num_steps:
                self.memory.append(list(self.last_n_transitions))

    def stop_current_episode(self):
        # if n-step transition hist is not full, add transition;
        # if n-step hist is indeed full, transition has already been added;
        if 0 < len(self.last_n_transitions) < self.num_steps:
            self.memory.append(list(self.last_n_transitions))
        # avoid duplicate entry
        if 0 < len(self.last_n_transitions) <= self.num_steps:
            del self.last_n_transitions[0]
        while self.last_n_transitions:
            self.memory.append(list(self.last_n_transitions))
            del self.last_n_transitions[0]
        assert len(self.last_n_transitions) == 0

    def sample(self, num_experiences):
        assert len(self.memory) >= num_experiences
        return self.memory.sample(num_experiences)

    def __len__(self):
        return len(self.memory)

    def save(self, filename):
        with open(filename, 'wb') as f:
            pickle.dump(self.memory, f)

    def load(self, filename):
        with open(filename, 'rb') as f:
            self.memory = pickle.load(f)
        if isinstance(self.memory, collections.deque):
            # Load v0.2
            self.memory = RandomAccessQueue(self.memory,
                                            maxlen=self.memory.maxlen)
Ejemplo n.º 6
0
class EpisodicReplayBuffer(object):
    def __init__(self, capacity=None):
        self.current_episode = []
        self.episodic_memory = RandomAccessQueue()
        self.memory = RandomAccessQueue()
        self.capacity = capacity

    def append(self,
               state,
               action,
               reward,
               next_state=None,
               next_action=None,
               is_state_terminal=False,
               **kwargs):
        """Append a transition to this replay buffer

        Args:
            state: s_t
            action: a_t
            reward: r_t
            next_state: s_{t+1} (can be None if terminal)
            next_action: a_{t+1} (can be None for off-policy algorithms)
            is_state_terminal (bool)
        """
        experience = dict(state=state,
                          action=action,
                          reward=reward,
                          next_state=next_state,
                          next_action=next_action,
                          is_state_terminal=is_state_terminal,
                          **kwargs)
        self.current_episode.append(experience)
        if is_state_terminal:
            self.stop_current_episode()

    def sample(self, n):
        """Sample n unique samples from this replay buffer"""
        assert len(self.memory) >= n
        return self.memory.sample(n)

    def sample_episodes(self, n_episodes, max_len=None):
        """Sample n unique samples from this replay buffer"""
        assert len(self.episodic_memory) >= n_episodes
        episodes = self.episodic_memory.sample(n_episodes)
        if max_len is not None:
            return [random_subseq(ep, max_len) for ep in episodes]
        else:
            return episodes

    def __len__(self):
        return len(self.episodic_memory)

    def save(self, filename):
        with open(filename, 'wb') as f:
            pickle.dump((self.memory, self.episodic_memory), f)

    def load(self, filename):
        with open(filename, 'rb') as f:
            self.memory, self.episodic_memory = pickle.load(f)

    def stop_current_episode(self):
        if self.current_episode:
            self.episodic_memory.append(self.current_episode)
            self.memory.extend(self.current_episode)
            self.current_episode = []
            while self.capacity is not None and \
                    len(self.memory) > self.capacity:
                discarded_episode = self.episodic_memory.popleft()
                for _ in range(len(discarded_episode)):
                    self.memory.popleft()
        assert not self.current_episode
Ejemplo n.º 7
0
class SuccessPrioReplayBuffer(chainerrl.replay_buffer.AbstractReplayBuffer):
    def __init__(self, capacity=None):
        self.current_episode = []
        self.current_episode_R = 0.0

        self.good_episodic_memory = []
        self.good_episodic_memory_capacity = 20
        self.good_memory = RandomAccessQueue()

        self.normal_episodic_memory = []
        self.normal_episodic_memory_capacity = 50
        self.normal_memory = RandomAccessQueue()

        self.bad_episodic_memory = []
        self.bad_episodic_memory_capacity = 10
        self.bad_memory = RandomAccessQueue()

        self.capacity = capacity
        self.all_step_count = 0
        self.episode_count = 0

    def append(self,
               state,
               action,
               reward,
               next_state=None,
               next_action=None,
               is_state_terminal=False,
               **kwargs):
        experience = dict(state=state,
                          action=action,
                          reward=reward,
                          next_state=next_state,
                          next_action=next_action,
                          is_state_terminal=is_state_terminal,
                          **kwargs)
        self.current_episode.append(experience)

        self.current_episode_R += reward
        self.all_step_count += 1

        if is_state_terminal:
            self.stop_current_episode()

    def sample(self, n):
        count_sample = 0
        ans = []
        if len(self.bad_memory) > 0:
            n_s = min((len(self.bad_memory), n // 4))
            ans.extend(self.bad_memory.sample(n_s))
            count_sample += n_s

        if len(self.normal_memory) > 0:
            n_s = min((len(self.normal_memory), (n // 4) * 2 - count_sample))
            ans.extend(self.normal_memory.sample(n_s))
            count_sample += n_s

        if len(self.good_memory) > 0:
            n_s = min((len(self.good_memory), (n // 4) * 3 - count_sample))
            ans.extend(self.good_memory.sample(n_s))
            count_sample += n_s

        if (count_sample < n) and (len(self.current_episode) > 0):
            n_s = min((len(self.current_episode), n - count_sample))
            #ans.extend(random.sample(self.current_episode, n_s))
            ans.extend(self.current_episode[len(self.current_episode) - 1 -
                                            n_s:len(self.current_episode) - 1])

        return ans

    def __len__(self):
        return self.all_step_count

    def save(self, filename):
        with open(filename, 'wb') as f:
            pickle.dump((self.good_episodic_memory,
                         self.normal_episodic_memory, self.bad_episodic_memory,
                         self.all_step_count, self.episode_count), f)

    def load(self, filename):
        with open(filename, 'rb') as f:
            memory = pickle.load(f)
        if isinstance(memory, tuple):
            self.good_episodic_memory, self.normal_episodic_memory, self.bad_episodic_memory, self.all_step_count, self.episode_count = memory

            self.good_memory = RandomAccessQueue()
            for e in self.good_episodic_memory:
                self.good_memory.extend(e[2])

            self.normal_memory = RandomAccessQueue()
            for e in self.normal_episodic_memory:
                self.normal_memory.extend(e[2])

            self.bad_memory = RandomAccessQueue()
            for e in self.bad_episodic_memory:
                self.bad_memory.extend(e[2])

            self.current_episode = []
            self.current_episode_R = 0.0
        else:
            print("bad replay file")

    def stop_current_episode(self):
        if self.current_episode:
            new_normal_episode = None
            if len(self.current_episode) > 1:
                if len(self.good_episodic_memory
                       ) >= self.good_episodic_memory_capacity:
                    new_normal_episode = heapq.heappushpop(
                        self.good_episodic_memory,
                        (copy.copy(self.current_episode_R),
                         copy.copy(self.episode_count), self.current_episode))
                else:
                    heapq.heappush(
                        self.good_episodic_memory,
                        (copy.copy(self.current_episode_R),
                         copy.copy(self.episode_count), self.current_episode))

            self.current_episode = []
            self.episode_count += 1

            new_bad_episode = None
            if new_normal_episode is not None:
                if len(self.normal_episodic_memory
                       ) >= self.normal_episodic_memory_capacity:
                    new_bad_episode = heapq.heappushpop(
                        self.normal_episodic_memory, new_normal_episode)
                else:
                    heapq.heappush(self.normal_episodic_memory,
                                   new_normal_episode)

            if new_bad_episode is not None:
                if len(self.bad_episodic_memory
                       ) >= self.bad_episodic_memory_capacity:
                    drop_episode = heapq.heappushpop(self.bad_episodic_memory,
                                                     new_bad_episode)
                    self.all_step_count -= len(drop_episode[2])
                else:
                    heapq.heappush(self.bad_episodic_memory, new_bad_episode)

            self.good_memory = RandomAccessQueue()
            for e in self.good_episodic_memory:
                self.good_memory.extend(e[2])

            self.normal_memory = RandomAccessQueue()
            for e in self.normal_episodic_memory:
                self.normal_memory.extend(e[2])

            self.bad_memory = RandomAccessQueue()
            for e in self.bad_episodic_memory:
                self.bad_memory.extend(e[2])

        assert not self.current_episode

        self.current_episode_R = 0.0