Exemplo n.º 1
0
    def __init__(self, max_size: Tuple[MemoryGranularity, int], allow_duplicates_in_batch_sampling: bool=True):
        """
        :param max_size: the maximum number of transitions or episodes to hold in the memory
        :param allow_duplicates_in_batch_sampling: allow having the same transition multiple times in a batch
        """
        super().__init__(max_size)
        if max_size[0] != MemoryGranularity.Transitions:
            raise ValueError("Experience replay size can only be configured in terms of transitions")
        self.transitions = []
        self.allow_duplicates_in_batch_sampling = allow_duplicates_in_batch_sampling

        self.reader_writer_lock = ReaderWriterLock()
Exemplo n.º 2
0
 def __init__(self, max_size: Tuple[MemoryGranularity, int]=(MemoryGranularity.Transitions, 1000000), n_step=-1):
     """
     :param max_size: the maximum number of transitions or episodes to hold in the memory
     """
     super().__init__(max_size)
     self.n_step = n_step
     self._buffer = [Episode(n_step=self.n_step)]  # list of episodes
     self.transitions = []
     self._length = 1  # the episodic replay buffer starts with a single empty episode
     self._num_transitions = 0
     self._num_transitions_in_complete_episodes = 0
     self.reader_writer_lock = ReaderWriterLock()
Exemplo n.º 3
0
    def __init__(self, max_size: Tuple[MemoryGranularity, int] = (MemoryGranularity.Transitions, 3000000), n_step=-1,
                 train_to_eval_ratio: int = 1):
        """
        :param max_size: the maximum number of transitions or episodes to hold in the memory
        """
        super().__init__(max_size)
        self.n_step = n_step
        self._buffer = [Episode(n_step=self.n_step)]  # list of episodes
        self.transitions = []
        self._length = 1  # the episodic replay buffer starts with a single empty episode
        self._num_transitions = 0
        self._num_transitions_in_complete_episodes = 0
        self.reader_writer_lock = ReaderWriterLock()
        self.last_training_set_episode_id = None  # used in batch-rl
        self.last_training_set_transition_id = None  # used in batch-rl
        self.train_to_eval_ratio = train_to_eval_ratio  # used in batch-rl
        self.evaluation_dataset_as_episodes = None
        self.evaluation_dataset_as_transitions = None

        self.frozen = False
class EpisodicExperienceReplay(Memory):
    """
    A replay buffer that stores episodes of transitions. The additional structure allows performing various
    calculations of total return and other values that depend on the sequential behavior of the transitions
    in the episode.
    """
    def __init__(self,
                 max_size: Tuple[MemoryGranularity,
                                 int] = (MemoryGranularity.Transitions,
                                         1000000),
                 n_step=-1,
                 train_to_eval_ratio: int = 1):
        """
        :param max_size: the maximum number of transitions or episodes to hold in the memory
        """
        super().__init__(max_size)
        self.n_step = n_step
        self._buffer = [Episode(n_step=self.n_step)]  # list of episodes
        self.transitions = []
        self._length = 1  # the episodic replay buffer starts with a single empty episode
        self._num_transitions = 0
        self._num_transitions_in_complete_episodes = 0
        self.reader_writer_lock = ReaderWriterLock()
        self.last_training_set_episode_id = None  # used in batch-rl
        self.last_training_set_transition_id = None  # used in batch-rl
        self.train_to_eval_ratio = train_to_eval_ratio  # used in batch-rl

    def length(self, lock: bool = False) -> int:
        """
        Get the number of episodes in the ER (even if they are not complete)
        """
        length = self._length
        if self._length is not 0 and self._buffer[-1].is_empty():
            length = self._length - 1

        return length

    def num_complete_episodes(self):
        """ Get the number of complete episodes in ER """
        length = self._length - 1

        return length

    def num_transitions(self):
        return self._num_transitions

    def num_transitions_in_complete_episodes(self):
        return self._num_transitions_in_complete_episodes

    def get_last_training_set_episode_id(self):
        return self.last_training_set_episode_id

    def sample(self,
               size: int,
               is_consecutive_transitions=False) -> List[Transition]:
        """
        Sample a batch of transitions from the replay buffer. If the requested size is larger than the number
        of samples available in the replay buffer then the batch will return empty.
        :param size: the size of the batch to sample
        :param is_consecutive_transitions: if set True, samples a batch of consecutive transitions.
        :return: a batch (list) of selected transitions from the replay buffer
        """
        self.reader_writer_lock.lock_writing()

        if self.num_complete_episodes() >= 1:
            if is_consecutive_transitions:
                episode_idx = np.random.randint(0,
                                                self.num_complete_episodes())
                if self._buffer[episode_idx].length() <= size:
                    batch = self._buffer[episode_idx].transitions
                else:
                    transition_idx = np.random.randint(
                        size, self._buffer[episode_idx].length())
                    batch = self._buffer[episode_idx].transitions[
                        transition_idx - size:transition_idx]
            else:
                transitions_idx = np.random.randint(
                    self.num_transitions_in_complete_episodes(), size=size)
                batch = [self.transitions[i] for i in transitions_idx]

        else:
            raise ValueError(
                "The episodic replay buffer cannot be sampled since there are no complete episodes yet. "
                "There is currently 1 episodes with {} transitions".format(
                    self._buffer[0].length()))

        self.reader_writer_lock.release_writing()

        return batch

    def get_episode_for_transition(self,
                                   transition: Transition) -> (int, Episode):
        """
        Get the episode from which that transition came from.
        :param transition: The transition to lookup the episode for
        :return: (Episode number, the episode) or (-1, None) if could not find a matching episode.
        """

        for i, episode in enumerate(self._buffer):
            if transition in episode.transitions:
                return i, episode
        return -1, None

    def shuffle_episodes(self):
        """
        Shuffle all the episodes in the replay buffer
        :return:
        """
        random.shuffle(self._buffer)
        self.transitions = [t for e in self._buffer for t in e.transitions]

    def get_shuffled_data_generator(self, size: int) -> List[Transition]:
        """
        Get an generator for iterating through the shuffled replay buffer, for processing the data in epochs.
        If the requested size is larger than the number of samples available in the replay buffer then the batch will
        return empty. The last returned batch may be smaller than the size requested, to accommodate for all the
        transitions in the replay buffer.

        :param size: the size of the batch to return
        :return: a batch (list) of selected transitions from the replay buffer
        """
        self.reader_writer_lock.lock_writing()
        if self.last_training_set_transition_id is None:
            if self.train_to_eval_ratio < 0 or self.train_to_eval_ratio >= 1:
                raise ValueError(
                    'train_to_eval_ratio should be in the (0, 1] range.')

            transition = self.transitions[round(
                self.train_to_eval_ratio *
                self.num_transitions_in_complete_episodes())]
            episode_num, episode = self.get_episode_for_transition(transition)
            self.last_training_set_episode_id = episode_num
            self.last_training_set_transition_id = \
                len([t for e in self.get_all_complete_episodes_from_to(0, self.last_training_set_episode_id + 1) for t in e])

        shuffled_transition_indices = list(
            range(self.last_training_set_transition_id))
        random.shuffle(shuffled_transition_indices)

        # The last batch drawn will usually be < batch_size (=the size variable)
        for i in range(math.ceil(len(shuffled_transition_indices) / size)):
            sample_data = [
                self.transitions[j]
                for j in shuffled_transition_indices[i * size:(i + 1) * size]
            ]
            self.reader_writer_lock.release_writing()

            yield sample_data

    def get_all_complete_episodes_transitions(self) -> List[Transition]:
        """
        Get all the transitions from all the complete episodes in the buffer
        :return: a list of transitions
        """
        return self.transitions[:self.num_transitions_in_complete_episodes()]

    def get_all_complete_episodes(self) -> List[Episode]:
        """
        Get all the transitions from all the complete episodes in the buffer
        :return: a list of transitions
        """
        return self.get_all_complete_episodes_from_to(
            0, self.num_complete_episodes())

    def get_all_complete_episodes_from_to(self, start_episode_id,
                                          end_episode_id) -> List[Episode]:
        """
        Get all the transitions from all the complete episodes in the buffer matching the given episode range
        :return: a list of transitions
        """
        return self._buffer[start_episode_id:end_episode_id]

    def _enforce_max_length(self) -> None:
        """
        Make sure that the size of the replay buffer does not pass the maximum size allowed.
        If it passes the max size, the oldest episode in the replay buffer will be removed.
        :return: None
        """
        granularity, size = self.max_size
        if granularity == MemoryGranularity.Transitions:
            while size != 0 and self.num_transitions() > size:
                self._remove_episode(0)
        elif granularity == MemoryGranularity.Episodes:
            while self.length() > size:
                self._remove_episode(0)

    def _update_episode(self, episode: Episode) -> None:
        episode.update_transitions_rewards_and_bootstrap_data()

    def verify_last_episode_is_closed(self) -> None:
        """
        Verify that there is no open episodes in the replay buffer
        :return: None
        """
        self.reader_writer_lock.lock_writing_and_reading()

        last_episode = self.get(-1, False)
        if last_episode and last_episode.length() > 0:
            self.close_last_episode(lock=False)

        self.reader_writer_lock.release_writing_and_reading()

    def close_last_episode(self, lock=True) -> None:
        """
        Close the last episode in the replay buffer and open a new one
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        last_episode = self._buffer[-1]

        self._num_transitions_in_complete_episodes += last_episode.length()
        self._length += 1

        # create a new Episode for the next transitions to be placed into
        self._buffer.append(Episode(n_step=self.n_step))

        # if update episode adds to the buffer, a new Episode needs to be ready first
        # it would be better if this were less state full
        self._update_episode(last_episode)

        self._enforce_max_length()

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def store(self, transition: Transition) -> None:
        """
        Store a new transition in the memory. If the transition game_over flag is on, this closes the episode and
        creates a new empty episode.
        Warning! using the episodic memory by storing individual transitions instead of episodes will use the default
        Episode class parameters in order to create new episodes.
        :param transition: a transition to store
        :return: None
        """

        # Calling super.store() so that in case a memory backend is used, the memory backend can store this transition.
        super().store(transition)

        self.reader_writer_lock.lock_writing_and_reading()

        if len(self._buffer) == 0:
            self._buffer.append(Episode(n_step=self.n_step))
        last_episode = self._buffer[-1]
        last_episode.insert(transition)
        self.transitions.append(transition)
        self._num_transitions += 1
        if transition.game_over:
            self.close_last_episode(False)

        self._enforce_max_length()

        self.reader_writer_lock.release_writing_and_reading()

    def store_episode(self, episode: Episode, lock: bool = True) -> None:
        """
        Store a new episode in the memory.
        :param episode: the new episode to store
        :return: None
        """
        # Calling super.store() so that in case a memory backend is used, the memory backend can store this episode.
        super().store_episode(episode)

        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        if self._buffer[-1].length() == 0:
            self._buffer[-1] = episode
        else:
            self._buffer.append(episode)
        self.transitions.extend(episode.transitions)
        self._num_transitions += episode.length()
        self.close_last_episode(False)

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def get_episode(self,
                    episode_index: int,
                    lock: bool = True) -> Union[None, Episode]:
        """
        Returns the episode in the given index. If the episode does not exist, returns None instead.
        :param episode_index: the index of the episode to return
        :return: the corresponding episode
        """
        if lock:
            self.reader_writer_lock.lock_writing()

        if self.length() == 0 or episode_index >= self.length():
            episode = None
        else:
            episode = self._buffer[episode_index]

        if lock:
            self.reader_writer_lock.release_writing()
        return episode

    def _remove_episode(self, episode_index: int) -> None:
        """
        Remove the episode in the given index (even if it is not complete yet)
        :param episode_index: the index of the episode to remove
        :return: None
        """
        if len(self._buffer) > episode_index:
            episode_length = self._buffer[episode_index].length()
            self._length -= 1
            self._num_transitions -= episode_length
            self._num_transitions_in_complete_episodes -= episode_length
            del self.transitions[:episode_length]
            del self._buffer[episode_index]

    def remove_episode(self, episode_index: int) -> None:
        """
        Remove the episode in the given index (even if it is not complete yet)
        :param episode_index: the index of the episode to remove
        :return: None
        """
        self.reader_writer_lock.lock_writing_and_reading()

        self._remove_episode(episode_index)

        self.reader_writer_lock.release_writing_and_reading()

    # for API compatibility
    def get(self,
            episode_index: int,
            lock: bool = True) -> Union[None, Episode]:
        """
        Returns the episode in the given index. If the episode does not exist, returns None instead.
        :param episode_index: the index of the episode to return
        :return: the corresponding episode
        """
        return self.get_episode(episode_index, lock)

    def get_last_complete_episode(self) -> Union[None, Episode]:
        """
        Returns the last complete episode in the memory or None if there are no complete episodes
        :return: None or the last complete episode
        """
        self.reader_writer_lock.lock_writing()

        last_complete_episode_index = self.num_complete_episodes() - 1
        episode = None
        if last_complete_episode_index >= 0:
            episode = self.get(last_complete_episode_index)

        self.reader_writer_lock.release_writing()

        return episode

    # for API compatibility
    def remove(self, episode_index: int):
        """
        Remove the episode in the given index (even if it is not complete yet)
        :param episode_index: the index of the episode to remove
        :return: None
        """
        self.remove_episode(episode_index)

    def clean(self) -> None:
        """
        Clean the memory by removing all the episodes
        :return: None
        """
        self.reader_writer_lock.lock_writing_and_reading()

        self.transitions = []
        self._buffer = [Episode(n_step=self.n_step)]
        self._length = 1
        self._num_transitions = 0
        self._num_transitions_in_complete_episodes = 0

        self.reader_writer_lock.release_writing_and_reading()

    def mean_reward(self) -> np.ndarray:
        """
        Get the mean reward in the replay buffer
        :return: the mean reward
        """
        self.reader_writer_lock.lock_writing()

        mean = np.mean([transition.reward for transition in self.transitions])

        self.reader_writer_lock.release_writing()
        return mean

    def load_csv(self, csv_dataset: CsvDataset) -> None:
        """
        Restore the replay buffer contents from a csv file.
        The csv file is assumed to include a list of transitions.
        :param csv_dataset: A construct which holds the dataset parameters
        """
        df = pd.read_csv(csv_dataset.filepath)
        if len(df) > self.max_size[1]:
            screen.warning(
                "Warning! The number of transitions to load into the replay buffer ({}) is "
                "bigger than the max size of the replay buffer ({}). The excessive transitions will "
                "not be stored.".format(len(df), self.max_size[1]))

        episode_ids = df['episode_id'].unique()
        progress_bar = ProgressBar(len(episode_ids))
        state_columns = [
            col for col in df.columns if col.startswith('state_feature')
        ]

        for e_id in episode_ids:
            progress_bar.update(e_id)
            df_episode_transitions = df[df['episode_id'] == e_id]
            episode = Episode()
            for (_, current_transition), (_, next_transition) in zip(
                    df_episode_transitions[:-1].iterrows(),
                    df_episode_transitions[1:].iterrows()):
                state = np.array(
                    [current_transition[col] for col in state_columns])
                next_state = np.array(
                    [next_transition[col] for col in state_columns])

                episode.insert(
                    Transition(
                        state={'observation': state},
                        action=current_transition['action'],
                        reward=current_transition['reward'],
                        next_state={'observation': next_state},
                        game_over=False,
                        info={
                            'all_action_probabilities':
                            ast.literal_eval(
                                current_transition['all_action_probabilities'])
                        }))

            # Set the last transition to end the episode
            if csv_dataset.is_episodic:
                episode.get_last_transition().game_over = True

            self.store_episode(episode)

        # close the progress bar
        progress_bar.update(len(episode_ids))
        progress_bar.close()

        self.shuffle_episodes()
Exemplo n.º 5
0
class ExperienceReplay(Memory):
    """
    A regular replay buffer which stores transition without any additional structure
    """
    def __init__(self,
                 max_size: Tuple[MemoryGranularity, int],
                 allow_duplicates_in_batch_sampling: bool = True):
        """
        :param max_size: the maximum number of transitions or episodes to hold in the memory
        :param allow_duplicates_in_batch_sampling: allow having the same transition multiple times in a batch
        """
        super().__init__(max_size)
        if max_size[0] != MemoryGranularity.Transitions:
            raise ValueError(
                "Experience replay size can only be configured in terms of transitions"
            )
        self.transitions = []
        self.allow_duplicates_in_batch_sampling = allow_duplicates_in_batch_sampling

        self.reader_writer_lock = ReaderWriterLock()

    def length(self) -> int:
        """
        Get the number of transitions in the ER
        """
        return self.num_transitions()

    def num_transitions(self) -> int:
        """
        Get the number of transitions in the ER
        """
        return len(self.transitions)

    def sample(self, size: int) -> List[Transition]:
        """
        Sample a batch of transitions form the replay buffer. If the requested size is larger than the number
        of samples available in the replay buffer then the batch will return empty.
        :param size: the size of the batch to sample
        :return: a batch (list) of selected transitions from the replay buffer
        """
        self.reader_writer_lock.lock_writing()

        if self.allow_duplicates_in_batch_sampling:
            transitions_idx = np.random.randint(self.num_transitions(),
                                                size=size)

        else:
            if self.num_transitions() >= size:
                transitions_idx = np.random.choice(self.num_transitions(),
                                                   size=size,
                                                   replace=False)
            else:
                raise ValueError(
                    "The replay buffer cannot be sampled since there are not enough transitions yet. "
                    "There are currently {} transitions".format(
                        self.num_transitions()))

        batch = [self.transitions[i] for i in transitions_idx]

        self.reader_writer_lock.release_writing()
        return batch

    def get_shuffled_data_generator(self, size: int) -> List[Transition]:
        """
        Get an generator for iterating through the shuffled replay buffer, for processing the data in epochs.
        If the requested size is larger than the number of samples available in the replay buffer then the batch will
        return empty. The last returned batch may be smaller than the size requested, to accommodate for all the
        transitions in the replay buffer.

        :param size: the size of the batch to return
        :return: a batch (list) of selected transitions from the replay buffer
        """
        self.reader_writer_lock.lock_writing()
        shuffled_transition_indices = list(range(len(self.transitions)))
        random.shuffle(shuffled_transition_indices)

        # we deliberately drop some of the ending data which is left after dividing to batches of size `size`
        # for i in range(math.ceil(len(shuffled_transition_indices) / size)):
        for i in range(int(len(shuffled_transition_indices) / size)):
            sample_data = [
                self.transitions[j]
                for j in shuffled_transition_indices[i * size:(i + 1) * size]
            ]
            self.reader_writer_lock.release_writing()

            yield sample_data

    def _enforce_max_length(self) -> None:
        """
        Make sure that the size of the replay buffer does not pass the maximum size allowed.
        If it passes the max size, the oldest transition in the replay buffer will be removed.
        This function does not use locks since it is only called internally
        :return: None
        """
        granularity, size = self.max_size
        if granularity == MemoryGranularity.Transitions:
            while size != 0 and self.num_transitions() > size:
                self.remove_transition(0, False)
        else:
            raise ValueError(
                "The granularity of the replay buffer can only be set in terms of transitions"
            )

    def store(self, transition: Transition, lock: bool = True) -> None:
        """
        Store a new transition in the memory.
        :param transition: a transition to store
        :param lock: if true, will lock the readers writers lock. this can cause a deadlock if an inheriting class
                     locks and then calls store with lock = True
        :return: None
        """
        # Calling super.store() so that in case a memory backend is used, the memory backend can store this transition.
        super().store(transition)
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        self.transitions.append(transition)
        self._enforce_max_length()

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def get_transition(self,
                       transition_index: int,
                       lock: bool = True) -> Union[None, Transition]:
        """
        Returns the transition in the given index. If the transition does not exist, returns None instead.
        :param transition_index: the index of the transition to return
        :param lock: use write locking if this is a shared memory
        :return: the corresponding transition
        """
        if lock:
            self.reader_writer_lock.lock_writing()

        if self.length() == 0 or transition_index >= self.length():
            transition = None
        else:
            transition = self.transitions[transition_index]

        if lock:
            self.reader_writer_lock.release_writing()

        return transition

    def remove_transition(self,
                          transition_index: int,
                          lock: bool = True) -> None:
        """
        Remove the transition in the given index.

        This does not remove the transition from the segment trees! it is just used to remove the transition
        from the transitions list
        :param transition_index: the index of the transition to remove
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        if self.num_transitions() > transition_index:
            del self.transitions[transition_index]

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    # for API compatibility
    def get(self,
            transition_index: int,
            lock: bool = True) -> Union[None, Transition]:
        """
        Returns the transition in the given index. If the transition does not exist, returns None instead.
        :param transition_index: the index of the transition to return
        :return: the corresponding transition
        """
        return self.get_transition(transition_index, lock)

    # for API compatibility
    def remove(self, transition_index: int, lock: bool = True):
        """
        Remove the transition in the given index
        :param transition_index: the index of the transition to remove
        :return: None
        """
        self.remove_transition(transition_index, lock)

    def clean(self, lock: bool = True) -> None:
        """
        Clean the memory by removing all the episodes
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        self.transitions = []

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def mean_reward(self) -> np.ndarray:
        """
        Get the mean reward in the replay buffer
        :return: the mean reward
        """
        self.reader_writer_lock.lock_writing()

        mean = np.mean([transition.reward for transition in self.transitions])

        self.reader_writer_lock.release_writing()

        return mean

    def save(self, file_path: str) -> None:
        """
        Save the replay buffer contents to a pickle file
        :param file_path: the path to the file that will be used to store the pickled transitions
        """
        with open(file_path, 'wb') as file:
            pickle.dump(self.transitions, file)

    def load_pickled(self, file_path: str) -> None:
        """
        Restore the replay buffer contents from a pickle file.
        The pickle file is assumed to include a list of transitions.
        :param file_path: The path to a pickle file to restore
        """
        with open(file_path, 'rb') as file:
            transitions = pickle.load(file)
            num_transitions = len(transitions)
            if num_transitions > self.max_size[1]:
                screen.warning(
                    "Warning! The number of transition to load into the replay buffer ({}) is "
                    "bigger than the max size of the replay buffer ({}). The excessive transitions will "
                    "not be stored.".format(num_transitions, self.max_size[1]))

            progress_bar = ProgressBar(num_transitions)
            for transition_idx, transition in enumerate(transitions):
                self.store(transition)

                # print progress
                if transition_idx % 100 == 0:
                    progress_bar.update(transition_idx)

            progress_bar.close()
Exemplo n.º 6
0
class EpisodicExperienceReplay(Memory):
    """
    A replay buffer that stores episodes of transitions. The additional structure allows performing various
    calculations of total return and other values that depend on the sequential behavior of the transitions
    in the episode.
    """
    def __init__(self, max_size: Tuple[MemoryGranularity, int]):
        """
        :param max_size: the maximum number of transitions or episodes to hold in the memory
        """
        super().__init__(max_size)

        self._buffer = [Episode()]  # list of episodes
        self.transitions = []
        self._length = 1  # the episodic replay buffer starts with a single empty episode
        self._num_transitions = 0
        self._num_transitions_in_complete_episodes = 0

        self.reader_writer_lock = ReaderWriterLock()

    def length(self, lock: bool = False) -> int:
        """
        Get the number of episodes in the ER (even if they are not complete)
        """
        length = self._length
        if self._length is not 0 and self._buffer[-1].is_empty():
            length = self._length - 1

        return length

    def num_complete_episodes(self):
        """ Get the number of complete episodes in ER """
        length = self._length - 1

        return length

    def num_transitions(self):
        return self._num_transitions

    def num_transitions_in_complete_episodes(self):
        return self._num_transitions_in_complete_episodes

    def sample(self, size: int) -> List[Transition]:
        """
        Sample a batch of transitions form the replay buffer. If the requested size is larger than the number
        of samples available in the replay buffer then the batch will return empty.
        :param size: the size of the batch to sample
        :return: a batch (list) of selected transitions from the replay buffer
        """
        self.reader_writer_lock.lock_writing()

        if self.num_complete_episodes() >= 1:
            transitions_idx = np.random.randint(
                self.num_transitions_in_complete_episodes(), size=size)
            batch = [self.transitions[i] for i in transitions_idx]

        else:
            raise ValueError(
                "The episodic replay buffer cannot be sampled since there are no complete episodes yet. "
                "There is currently 1 episodes with {} transitions".format(
                    self._buffer[0].length()))

        self.reader_writer_lock.release_writing()

        return batch

    def _enforce_max_length(self) -> None:
        """
        Make sure that the size of the replay buffer does not pass the maximum size allowed.
        If it passes the max size, the oldest episode in the replay buffer will be removed.
        :return: None
        """
        granularity, size = self.max_size
        if granularity == MemoryGranularity.Transitions:
            while size != 0 and self.num_transitions() > size:
                self._remove_episode(0)
        elif granularity == MemoryGranularity.Episodes:
            while self.length() > size:
                self._remove_episode(0)

    def _update_episode(self, episode: Episode) -> None:
        episode.update_returns()

    def verify_last_episode_is_closed(self) -> None:
        """
        Verify that there is no open episodes in the replay buffer
        :return: None
        """
        self.reader_writer_lock.lock_writing_and_reading()

        last_episode = self.get(-1, False)
        if last_episode and last_episode.length() > 0:
            self.close_last_episode(lock=False)

        self.reader_writer_lock.release_writing_and_reading()

    def close_last_episode(self, lock=True) -> None:
        """
        Close the last episode in the replay buffer and open a new one
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        last_episode = self._buffer[-1]

        self._num_transitions_in_complete_episodes += last_episode.length()
        self._length += 1

        # create a new Episode for the next transitions to be placed into
        self._buffer.append(Episode())

        # if update episode adds to the buffer, a new Episode needs to be ready first
        # it would be better if this were less state full
        self._update_episode(last_episode)

        self._enforce_max_length()

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def store(self, transition: Transition) -> None:
        """
        Store a new transition in the memory. If the transition game_over flag is on, this closes the episode and
        creates a new empty episode.
        Warning! using the episodic memory by storing individual transitions instead of episodes will use the default
        Episode class parameters in order to create new episodes.
        :param transition: a transition to store
        :return: None
        """
        self.reader_writer_lock.lock_writing_and_reading()

        if len(self._buffer) == 0:
            self._buffer.append(Episode())
        last_episode = self._buffer[-1]
        last_episode.insert(transition)
        self.transitions.append(transition)
        self._num_transitions += 1
        if transition.game_over:
            self.close_last_episode(False)

        self._enforce_max_length()

        self.reader_writer_lock.release_writing_and_reading()

    def store_episode(self, episode: Episode, lock: bool = True) -> None:
        """
        Store a new episode in the memory.
        :param episode: the new episode to store
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        if self._buffer[-1].length() == 0:
            self._buffer[-1] = episode
        else:
            self._buffer.append(episode)
        self.transitions.extend(episode.transitions)
        self._num_transitions += episode.length()
        self.close_last_episode(False)

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def get_episode(self,
                    episode_index: int,
                    lock: bool = True) -> Union[None, Episode]:
        """
        Returns the episode in the given index. If the episode does not exist, returns None instead.
        :param episode_index: the index of the episode to return
        :return: the corresponding episode
        """
        if lock:
            self.reader_writer_lock.lock_writing()

        if self.length() == 0 or episode_index >= self.length():
            episode = None
        else:
            episode = self._buffer[episode_index]

        if lock:
            self.reader_writer_lock.release_writing()
        return episode

    def _remove_episode(self, episode_index: int) -> None:
        """
        Remove the episode in the given index (even if it is not complete yet)
        :param episode_index: the index of the episode to remove
        :return: None
        """
        if len(self._buffer) > episode_index:
            episode_length = self._buffer[episode_index].length()
            self._length -= 1
            self._num_transitions -= episode_length
            self._num_transitions_in_complete_episodes -= episode_length
            del self.transitions[:episode_length]
            del self._buffer[episode_index]

    def remove_episode(self, episode_index: int) -> None:
        """
        Remove the episode in the given index (even if it is not complete yet)
        :param episode_index: the index of the episode to remove
        :return: None
        """
        self.reader_writer_lock.lock_writing_and_reading()

        self._remove_episode(episode_index)

        self.reader_writer_lock.release_writing_and_reading()

    # for API compatibility
    def get(self,
            episode_index: int,
            lock: bool = True) -> Union[None, Episode]:
        """
        Returns the episode in the given index. If the episode does not exist, returns None instead.
        :param episode_index: the index of the episode to return
        :return: the corresponding episode
        """
        return self.get_episode(episode_index, lock)

    def get_last_complete_episode(self) -> Union[None, Episode]:
        """
        Returns the last complete episode in the memory or None if there are no complete episodes
        :return: None or the last complete episode
        """
        self.reader_writer_lock.lock_writing()

        last_complete_episode_index = self.num_complete_episodes() - 1
        episode = None
        if last_complete_episode_index >= 0:
            episode = self.get(last_complete_episode_index)

        self.reader_writer_lock.release_writing()

        return episode

    # for API compatibility
    def remove(self, episode_index: int):
        """
        Remove the episode in the given index (even if it is not complete yet)
        :param episode_index: the index of the episode to remove
        :return: None
        """
        self.remove_episode(episode_index)

    def update_last_transition_info(self, info: Dict[str, Any]) -> None:
        """
        Update the info of the last transition stored in the memory
        :param info: the new info to append to the existing info
        :return: None
        """
        self.reader_writer_lock.lock_writing_and_reading()

        episode = self._buffer[-1]
        if episode.length() == 0:
            if len(self._buffer) < 2:
                return
            episode = self._buffer[-2]
        episode.transitions[-1].info.update(info)

        self.reader_writer_lock.release_writing_and_reading()

    def clean(self) -> None:
        """
        Clean the memory by removing all the episodes
        :return: None
        """
        self.reader_writer_lock.lock_writing_and_reading()

        self.transitions = []
        self._buffer = [Episode()]
        self._length = 1
        self._num_transitions = 0
        self._num_transitions_in_complete_episodes = 0

        self.reader_writer_lock.release_writing_and_reading()

    def mean_reward(self) -> np.ndarray:
        """
        Get the mean reward in the replay buffer
        :return: the mean reward
        """
        self.reader_writer_lock.lock_writing()

        mean = np.mean([transition.reward for transition in self.transitions])

        self.reader_writer_lock.release_writing()
        return mean
class EpisodicExperienceReplay(Memory):
    """
    A replay buffer that stores episodes of transitions. The additional structure allows performing various
    calculations of total return and other values that depend on the sequential behavior of the transitions
    in the episode.
    """
    def __init__(self,
                 max_size: Tuple[MemoryGranularity,
                                 int] = (MemoryGranularity.Transitions,
                                         1000000),
                 n_step=-1,
                 train_to_eval_ratio: int = 1):
        """
        :param max_size: the maximum number of transitions or episodes to hold in the memory
        """
        super().__init__(max_size)
        self.n_step = n_step
        self._buffer = [Episode(n_step=self.n_step)]  # list of episodes
        self.transitions = []
        self._length = 1  # the episodic replay buffer starts with a single empty episode
        self._num_transitions = 0
        self._num_transitions_in_complete_episodes = 0
        self.reader_writer_lock = ReaderWriterLock()
        self.last_training_set_episode_id = None  # used in batch-rl
        self.last_training_set_transition_id = None  # used in batch-rl
        self.train_to_eval_ratio = train_to_eval_ratio  # used in batch-rl
        self.evaluation_dataset_as_episodes = None
        self.evaluation_dataset_as_transitions = None

        self.frozen = False

    def length(self, lock: bool = False) -> int:
        """
        Get the number of episodes in the ER (even if they are not complete)
        """
        length = self._length
        if self._length is not 0 and self._buffer[-1].is_empty():
            length = self._length - 1

        return length

    def num_complete_episodes(self):
        """ Get the number of complete episodes in ER """
        length = self._length - 1

        return length

    def num_transitions(self):
        return self._num_transitions

    def num_transitions_in_complete_episodes(self):
        return self._num_transitions_in_complete_episodes

    def get_last_training_set_episode_id(self):
        return self.last_training_set_episode_id

    def get_last_training_set_transition_id(self):
        return self.last_training_set_transition_id

    def sample(self,
               size: int,
               is_consecutive_transitions=False) -> List[Transition]:
        """
        Sample a batch of transitions from the replay buffer. If the requested size is larger than the number
        of samples available in the replay buffer then the batch will return empty.
        :param size: the size of the batch to sample
        :param is_consecutive_transitions: if set True, samples a batch of consecutive transitions.
        :return: a batch (list) of selected transitions from the replay buffer
        """
        self.reader_writer_lock.lock_writing()

        if self.num_complete_episodes() >= 1:
            if is_consecutive_transitions:
                episode_idx = np.random.randint(0,
                                                self.num_complete_episodes())
                if self._buffer[episode_idx].length() <= size:
                    batch = self._buffer[episode_idx].transitions
                else:
                    transition_idx = np.random.randint(
                        size, self._buffer[episode_idx].length())
                    batch = self._buffer[episode_idx].transitions[
                        transition_idx - size:transition_idx]
            else:
                transitions_idx = np.random.randint(
                    self.num_transitions_in_complete_episodes(), size=size)
                batch = [self.transitions[i] for i in transitions_idx]

        else:
            raise ValueError(
                "The episodic replay buffer cannot be sampled since there are no complete episodes yet. "
                "There is currently 1 episodes with {} transitions".format(
                    self._buffer[0].length()))

        self.reader_writer_lock.release_writing()

        return batch

    # this is something added to sync the sampling of all agents for given index
    def sample_with_index(
            self,
            transitions_idx: List[int],
            episode_idx=-1,
            is_consecutive_transitions=False) -> List[Transition]:
        """
        Sample a batch of transitions from the replay buffer, using certain index.
        :param transitions_idx: the list of indices of the batch to sample
        :param episode_idx: the index of an episode
        :param is_consecutive_transitions: if set True, samples a batch of consecutive transitions.
        :return: a batch (list) of selected transitions from the replay buffer
        """
        self.reader_writer_lock.lock_writing()
        size = len(transitions_idx)

        if self.num_complete_episodes() < episode_idx and episode_idx != -1:
            episode_idx = self.num_complete_episodes(
            ) - 1  # if there are no that many episodes, just take the last one

        if is_consecutive_transitions:
            if self._buffer[episode_idx].length() <= size:
                batch = self._buffer[episode_idx].transitions
            else:
                transition_idx = np.random.randint(
                    size, self._buffer[episode_idx].length())
                batch = self._buffer[episode_idx].transitions[
                    transition_idx - size:transition_idx]
        else:
            # transitions_idx = np.random.randint(self.num_transitions_in_complete_episodes(), size=size)
            transitions_idx = np.asarray(transitions_idx)
            batch = [self.transitions[i] for i in transitions_idx]

        print(batch)

        self.reader_writer_lock.release_writing()

        return batch

    def get_episode_for_transition(self,
                                   transition: Transition) -> (int, Episode):
        """
        Get the episode from which that transition came from.
        :param transition: The transition to lookup the episode for
        :return: (Episode number, the episode) or (-1, None) if could not find a matching episode.
        """

        for i, episode in enumerate(self._buffer):
            if transition in episode.transitions:
                return i, episode
        return -1, None

    def shuffle_episodes(self):
        """
        Shuffle all the complete episodes in the replay buffer, while deleting the last non-complete episode
        :return:
        """
        self.reader_writer_lock.lock_writing()

        self.assert_not_frozen()

        # unlike the standard usage of the EpisodicExperienceReplay, where we always leave an empty episode after
        # the last full one, so that new transitions will have where to be added, in this case we delibrately remove
        # that empty last episode, as we are about to shuffle the memory, and we don't want it to be shuffled in
        self.remove_last_episode(lock=False)

        random.shuffle(self._buffer)
        self.transitions = [t for e in self._buffer for t in e.transitions]

        # create a new Episode for the next transitions to be placed into
        self._buffer.append(Episode(n_step=self.n_step))
        self._length += 1

        self.reader_writer_lock.release_writing()

    def get_shuffled_training_data_generator(self,
                                             size: int) -> List[Transition]:
        """
        Get an generator for iterating through the shuffled replay buffer, for processing the data in epochs.
        If the requested size is larger than the number of samples available in the replay buffer then the batch will
        return empty. The last returned batch may be smaller than the size requested, to accommodate for all the
        transitions in the replay buffer.

        :param size: the size of the batch to return
        :return: a batch (list) of selected transitions from the replay buffer
        """
        self.reader_writer_lock.lock_writing()

        shuffled_transition_indices = list(
            range(self.last_training_set_transition_id))
        random.shuffle(shuffled_transition_indices)

        # print(shuffled_transition_indices)
        # print(random.shuffle(shuffled_transition_indices))

        # The last batch drawn will usually be < batch_size (=the size variable)
        for i in range(math.ceil(len(shuffled_transition_indices) / size)):
            sample_data = [
                self.transitions[j]
                for j in shuffled_transition_indices[i * size:(i + 1) * size]
            ]
            self.reader_writer_lock.release_writing()

            yield sample_data

    # this is something added to sync the sampling of all agents for given index
    def get_shuffled_training_data_generator_with_index(
            self, size: int,
            shuffled_transition_indices: List[int]) -> List[Transition]:
        """
        Get an generator for iterating through the shuffled replay buffer, using certain index.
        If the requested size is larger than the number of samples available in the replay buffer then the batch will
        return empty. The last returned batch may be smaller than the size requested, to accommodate for all the
        transitions in the replay buffer.

        :param shuffled_transition_indices: the list of shuffled indices of the batch to sample
        :param episode_idx: the index of an episode
        :return: a batch (list) of selected transitions from the replay buffer
        """
        self.reader_writer_lock.lock_writing()

        # The last batch drawn will usually be < batch_size (=the size variable)
        for i in range(math.ceil(len(shuffled_transition_indices) / size)):
            sample_data = [
                self.transitions[j]
                for j in shuffled_transition_indices[i * size:(i + 1) * size]
            ]
            self.reader_writer_lock.release_writing()

            yield sample_data

    def get_all_complete_episodes_transitions(self) -> List[Transition]:
        """
        Get all the transitions from all the complete episodes in the buffer
        :return: a list of transitions
        """
        return self.transitions[:self.num_transitions_in_complete_episodes()]

    def get_all_complete_episodes(self) -> List[Episode]:
        """
        Get all the transitions from all the complete episodes in the buffer
        :return: a list of transitions
        """
        return self.get_all_complete_episodes_from_to(
            0, self.num_complete_episodes())

    def get_all_complete_episodes_from_to(self, start_episode_id,
                                          end_episode_id) -> List[Episode]:
        """
        Get all the transitions from all the complete episodes in the buffer matching the given episode range
        :return: a list of transitions
        """
        return self._buffer[start_episode_id:end_episode_id]

    def _enforce_max_length(self) -> None:
        """
        Make sure that the size of the replay buffer does not pass the maximum size allowed.
        If it passes the max size, the oldest episode in the replay buffer will be removed.
        :return: None
        """
        granularity, size = self.max_size
        if granularity == MemoryGranularity.Transitions:
            while size != 0 and self.num_transitions() > size:
                self.remove_first_episode(lock=False)
        elif granularity == MemoryGranularity.Episodes:
            while self.length() > size:
                self.remove_first_episode(lock=False)

    def _update_episode(self, episode: Episode) -> None:
        episode.update_transitions_rewards_and_bootstrap_data()

    def verify_last_episode_is_closed(self) -> None:
        """
        Verify that there is no open episodes in the replay buffer
        :return: None
        """
        self.reader_writer_lock.lock_writing_and_reading()

        last_episode = self.get(-1, False)
        if last_episode and last_episode.length() > 0:
            self.close_last_episode(lock=False)

        self.reader_writer_lock.release_writing_and_reading()

    def close_last_episode(self, lock=True) -> None:
        """
        Close the last episode in the replay buffer and open a new one
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        last_episode = self._buffer[-1]

        self._num_transitions_in_complete_episodes += last_episode.length()
        self._length += 1

        # create a new Episode for the next transitions to be placed into
        self._buffer.append(Episode(n_step=self.n_step))

        # if update episode adds to the buffer, a new Episode needs to be ready first
        # it would be better if this were less state full
        self._update_episode(last_episode)

        self._enforce_max_length()

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def store(self, transition: Transition) -> None:
        """
        Store a new transition in the memory. If the transition game_over flag is on, this closes the episode and
        creates a new empty episode.
        Warning! using the episodic memory by storing individual transitions instead of episodes will use the default
        Episode class parameters in order to create new episodes.
        :param transition: a transition to store
        :return: None
        """
        self.assert_not_frozen()

        # Calling super.store() so that in case a memory backend is used, the memory backend can store this transition.
        super().store(transition)

        self.reader_writer_lock.lock_writing_and_reading()

        if len(self._buffer) == 0:
            self._buffer.append(Episode(n_step=self.n_step))
        last_episode = self._buffer[-1]
        last_episode.insert(transition)
        self.transitions.append(transition)
        self._num_transitions += 1
        if transition.game_over:
            self.close_last_episode(False)

        self._enforce_max_length()

        self.reader_writer_lock.release_writing_and_reading()

    def store_episode(self, episode: Episode, lock: bool = True) -> None:
        """
        Store a new episode in the memory.
        :param episode: the new episode to store
        :return: None
        """
        self.assert_not_frozen()

        # Calling super.store() so that in case a memory backend is used, the memory backend can store this episode.
        super().store_episode(episode)

        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        if self._buffer[-1].length() == 0:
            self._buffer[-1] = episode
        else:
            self._buffer.append(episode)
        self.transitions.extend(episode.transitions)
        self._num_transitions += episode.length()
        self.close_last_episode(False)

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def get_episode(self,
                    episode_index: int,
                    lock: bool = True) -> Union[None, Episode]:
        """
        Returns the episode in the given index. If the episode does not exist, returns None instead.
        :param episode_index: the index of the episode to return
        :return: the corresponding episode
        """
        if lock:
            self.reader_writer_lock.lock_writing()

        if self.length() == 0 or episode_index >= self.length():
            episode = None
        else:
            episode = self._buffer[episode_index]

        if lock:
            self.reader_writer_lock.release_writing()
        return episode

    def _remove_episode(self, episode_index: int) -> None:
        """
        Remove either the first or the last index
        :param episode_index: the index of the episode to remove (either 0 or -1)
        :return: None
        """
        self.assert_not_frozen()
        assert episode_index == 0 or episode_index == -1, "_remove_episode only supports removing the first or the last " \
                                                          "episode"

        if len(self._buffer) > 0:
            episode_length = self._buffer[episode_index].length()
            self._length -= 1
            self._num_transitions -= episode_length
            self._num_transitions_in_complete_episodes -= episode_length
            if episode_index == 0:
                del self.transitions[:episode_length]
            else:  # episode_index = -1
                del self.transitions[-episode_length:]
            del self._buffer[episode_index]

    def remove_first_episode(self, lock: bool = True) -> None:
        """
        Remove the first episode (even if it is not complete yet)
        :param lock: if true, will lock the readers writers lock. this can cause a deadlock if an inheriting class
                     locks and then calls store with lock = True
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        self._remove_episode(0)
        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def remove_last_episode(self, lock: bool = True) -> None:
        """
        Remove the last episode (even if it is not complete yet)
        :param lock: if true, will lock the readers writers lock. this can cause a deadlock if an inheriting class
                     locks and then calls store with lock = True
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        self._remove_episode(-1)

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    # for API compatibility
    def get(self,
            episode_index: int,
            lock: bool = True) -> Union[None, Episode]:
        """
        Returns the episode in the given index. If the episode does not exist, returns None instead.
        :param episode_index: the index of the episode to return
        :return: the corresponding episode
        """
        return self.get_episode(episode_index, lock)

    def get_last_complete_episode(self) -> Union[None, Episode]:
        """
        Returns the last complete episode in the memory or None if there are no complete episodes
        :return: None or the last complete episode
        """
        self.reader_writer_lock.lock_writing()

        last_complete_episode_index = self.num_complete_episodes() - 1
        episode = None
        if last_complete_episode_index >= 0:
            episode = self.get(last_complete_episode_index)

        self.reader_writer_lock.release_writing()

        return episode

    def clean(self) -> None:
        """
        Clean the memory by removing all the episodes
        :return: None
        """
        self.assert_not_frozen()
        self.reader_writer_lock.lock_writing_and_reading()

        self.transitions = []
        self._buffer = [Episode(n_step=self.n_step)]
        self._length = 1
        self._num_transitions = 0
        self._num_transitions_in_complete_episodes = 0

        self.reader_writer_lock.release_writing_and_reading()

    def mean_reward(self) -> np.ndarray:
        """
        Get the mean reward in the replay buffer
        :return: the mean reward
        """
        self.reader_writer_lock.lock_writing()

        mean = np.mean([transition.reward for transition in self.transitions])

        self.reader_writer_lock.release_writing()
        return mean

    def load_csv(self, csv_dataset: CsvDataset,
                 input_filter: InputFilter) -> None:
        """
        Restore the replay buffer contents from a csv file.
        The csv file is assumed to include a list of transitions.
        :param csv_dataset: A construct which holds the dataset parameters
        :param input_filter: A filter used to filter the CSV data before feeding it to the memory.
        """
        self.assert_not_frozen()

        df = pd.read_csv(csv_dataset.filepath)
        if len(df) > self.max_size[1]:
            screen.warning(
                "Warning! The number of transitions to load into the replay buffer ({}) is "
                "bigger than the max size of the replay buffer ({}). The excessive transitions will "
                "not be stored.".format(len(df), self.max_size[1]))

        episode_ids = df['episode_id'].unique()
        progress_bar = ProgressBar(len(episode_ids))
        state_columns = [
            col for col in df.columns if col.startswith('state_feature')
        ]

        for e_id in episode_ids:
            progress_bar.update(e_id)
            df_episode_transitions = df[df['episode_id'] == e_id]
            input_filter.reset()

            if len(df_episode_transitions) < 2:
                # we have to have at least 2 rows in each episode for creating a transition
                continue

            episode = Episode()
            transitions = []
            for (_, current_transition), (_, next_transition) in zip(
                    df_episode_transitions[:-1].iterrows(),
                    df_episode_transitions[1:].iterrows()):
                state = np.array(
                    [current_transition[col] for col in state_columns])
                next_state = np.array(
                    [next_transition[col] for col in state_columns])

                transitions.append(
                    Transition(
                        state={'observation': state},
                        action=int(current_transition['action']),
                        reward=current_transition['reward'],
                        next_state={'observation': next_state},
                        game_over=False,
                        info={
                            'all_action_probabilities':
                            ast.literal_eval(
                                current_transition['all_action_probabilities'])
                        }), )

            transitions = input_filter.filter(transitions, deep_copy=False)
            for t in transitions:
                episode.insert(t)

            # Set the last transition to end the episode
            if csv_dataset.is_episodic:
                episode.get_last_transition().game_over = True

            self.store_episode(episode)

        # close the progress bar
        progress_bar.update(len(episode_ids))
        progress_bar.close()

    def freeze(self):
        """
        Freezing the replay buffer does not allow any new transitions to be added to the memory.
        Useful when working with a dataset (e.g. batch-rl or imitation learning).
        :return: None
        """
        self.frozen = True

    def assert_not_frozen(self):
        """
        Check that the memory is not frozen, and can be changed.
        :return:
        """
        assert self.frozen is False, "Memory is frozen, and cannot be changed."

    def prepare_evaluation_dataset(self):
        """
        Gather the memory content that will be used for off-policy evaluation in episodes and transitions format
        :return:
        """
        self.reader_writer_lock.lock_writing_and_reading()

        self._split_training_and_evaluation_datasets()
        self.evaluation_dataset_as_episodes = deepcopy(
            self.get_all_complete_episodes_from_to(
                self.get_last_training_set_episode_id() + 1,
                self.num_complete_episodes()))

        if len(self.evaluation_dataset_as_episodes) == 0:
            raise ValueError(
                'train_to_eval_ratio is too high causing the evaluation set to be empty. '
                'Consider decreasing its value.')

        self.evaluation_dataset_as_transitions = [
            t for e in self.evaluation_dataset_as_episodes
            for t in e.transitions
        ]
        self.reader_writer_lock.release_writing_and_reading()

    def _split_training_and_evaluation_datasets(self):
        """
        If the data in the buffer was not split to training and evaluation yet, split it accordingly.
        :return: None
        """

        if self.last_training_set_transition_id is None:
            if self.train_to_eval_ratio < 0 or self.train_to_eval_ratio >= 1:
                raise ValueError(
                    'train_to_eval_ratio should be in the (0, 1] range.')

            transition = self.transitions[round(
                self.train_to_eval_ratio *
                self.num_transitions_in_complete_episodes())]
            episode_num, episode = self.get_episode_for_transition(transition)
            self.last_training_set_episode_id = episode_num
            self.last_training_set_transition_id = \
                len([t for e in self.get_all_complete_episodes_from_to(0, self.last_training_set_episode_id + 1) for t in e])

    def save(self, file_path: str) -> None:
        """
        Save the replay buffer contents to a pickle file
        :param file_path: the path to the file that will be used to store the pickled transitions
        """
        with open(file_path, 'wb') as file:
            pickle.dump(self.get_all_complete_episodes(), file)

    def load_pickled(self, file_path: str) -> None:
        """
        Restore the replay buffer contents from a pickle file.
        The pickle file is assumed to include a list of transitions.
        :param file_path: The path to a pickle file to restore
        """
        self.assert_not_frozen()

        with open(file_path, 'rb') as file:
            episodes = pickle.load(file)
            num_transitions = sum([len(e.transitions) for e in episodes])
            if num_transitions > self.max_size[1]:
                screen.warning(
                    "Warning! The number of transition to load into the replay buffer ({}) is "
                    "bigger than the max size of the replay buffer ({}). The excessive transitions will "
                    "not be stored.".format(num_transitions, self.max_size[1]))

            progress_bar = ProgressBar(len(episodes))
            for episode_idx, episode in enumerate(episodes):
                self.store_episode(episode)

                # print progress
                progress_bar.update(episode_idx)

            progress_bar.close()
Exemplo n.º 8
0
class ExperienceReplay(Memory):
    """
    A regular replay buffer which stores transition without any additional structure
    """
    def __init__(self,
                 max_size: Tuple[MemoryGranularity, int],
                 allow_duplicates_in_batch_sampling: bool = True):
        """
        :param max_size: the maximum number of transitions or episodes to hold in the memory
        :param allow_duplicates_in_batch_sampling: allow having the same transition multiple times in a batch
        """
        super().__init__(max_size)
        if max_size[0] != MemoryGranularity.Transitions:
            raise ValueError(
                "Experience replay size can only be configured in terms of transitions"
            )
        self.transitions = []
        self._num_transitions = 0
        self.allow_duplicates_in_batch_sampling = allow_duplicates_in_batch_sampling

        self.reader_writer_lock = ReaderWriterLock()

    def length(self) -> int:
        """
        Get the number of transitions in the ER
        """
        return self.num_transitions()

    def num_transitions(self) -> int:
        """
        Get the number of transitions in the ER
        """
        return self._num_transitions

    def sample(self, size: int) -> List[Transition]:
        """
        Sample a batch of transitions form the replay buffer. If the requested size is larger than the number
        of samples available in the replay buffer then the batch will return empty.
        :param size: the size of the batch to sample
        :param beta: the beta parameter used for importance sampling
        :return: a batch (list) of selected transitions from the replay buffer
        """
        self.reader_writer_lock.lock_writing()

        if self.allow_duplicates_in_batch_sampling:
            transitions_idx = np.random.randint(self.num_transitions(),
                                                size=size)

        else:
            if self.num_transitions() >= size:
                transitions_idx = np.random.choice(self.num_transitions(),
                                                   size=size,
                                                   replace=False)
            else:
                raise ValueError(
                    "The replay buffer cannot be sampled since there are not enough transitions yet. "
                    "There are currently {} transitions".format(
                        self.num_transitions()))

        batch = [self.transitions[i] for i in transitions_idx]

        self.reader_writer_lock.release_writing()

        return batch

    def _enforce_max_length(self) -> None:
        """
        Make sure that the size of the replay buffer does not pass the maximum size allowed.
        If it passes the max size, the oldest transition in the replay buffer will be removed.
        This function does not use locks since it is only called internally
        :return: None
        """
        granularity, size = self.max_size
        if granularity == MemoryGranularity.Transitions:
            while size != 0 and self.num_transitions() > size:
                self.remove_transition(0, False)
        else:
            raise ValueError(
                "The granularity of the replay buffer can only be set in terms of transitions"
            )

    def store(self, transition: Transition, lock: bool = True) -> None:
        """
        Store a new transition in the memory.
        :param transition: a transition to store
        :param lock: if true, will lock the readers writers lock. this can cause a deadlock if an inheriting class
                     locks and then calls store with lock = True
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        self._num_transitions += 1
        self.transitions.append(transition)
        self._enforce_max_length()

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def get_transition(self,
                       transition_index: int,
                       lock: bool = True) -> Union[None, Transition]:
        """
        Returns the transition in the given index. If the transition does not exist, returns None instead.
        :param transition_index: the index of the transition to return
        :param lock: use write locking if this is a shared memory
        :return: the corresponding transition
        """
        if lock:
            self.reader_writer_lock.lock_writing()

        if self.length() == 0 or transition_index >= self.length():
            transition = None
        else:
            transition = self.transitions[transition_index]

        if lock:
            self.reader_writer_lock.release_writing()

        return transition

    def remove_transition(self,
                          transition_index: int,
                          lock: bool = True) -> None:
        """
        Remove the transition in the given index.
        This does not remove the transition from the segment trees! it is just used to remove the transition
        from the transitions list
        :param transition_index: the index of the transition to remove
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        if self.num_transitions() > transition_index:
            self._num_transitions -= 1
            del self.transitions[transition_index]

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    # for API compatibility
    def get(self,
            transition_index: int,
            lock: bool = True) -> Union[None, Transition]:
        """
        Returns the transition in the given index. If the transition does not exist, returns None instead.
        :param transition_index: the index of the transition to return
        :return: the corresponding transition
        """
        return self.get_transition(transition_index, lock)

    # for API compatibility
    def remove(self, transition_index: int, lock: bool = True):
        """
        Remove the transition in the given index
        :param transition_index: the index of the transition to remove
        :return: None
        """
        self.remove_transition(transition_index, lock)

    def update_last_transition_info(self, info: Dict[str, Any]) -> None:
        """
        Update the info of the last transition stored in the memory
        :param info: the new info to append to the existing info
        :return: None
        """
        self.reader_writer_lock.lock_writing_and_reading()

        if self.length() == 0:
            raise ValueError("There are no transition in the replay buffer")
        self.transitions[-1].info.update(info)

        self.reader_writer_lock.release_writing_and_reading()

    def clean(self, lock: bool = True) -> None:
        """
        Clean the memory by removing all the episodes
        :return: None
        """
        if lock:
            self.reader_writer_lock.lock_writing_and_reading()

        self.transitions = []
        self._num_transitions = 0

        if lock:
            self.reader_writer_lock.release_writing_and_reading()

    def mean_reward(self) -> np.ndarray:
        """
        Get the mean reward in the replay buffer
        :return: the mean reward
        """
        self.reader_writer_lock.lock_writing()

        mean = np.mean([transition.reward for transition in self.transitions])

        self.reader_writer_lock.release_writing()

        return mean