예제 #1
0
    def _setup_model(self) -> None:
        self._setup_lr_schedule()
        self.set_random_seed(self.seed)
        self.replay_buffer = ReplayBuffer(
            self.buffer_size,
            self.observation_space,
            self.action_space,
            self.device,
            optimize_memory_usage=self.optimize_memory_usage,
        )
        self.policy = self.policy_class(
            self.observation_space,
            self.action_space,
            self.support_dim,
            self.lr_schedule,
            **self.policy_kwargs,  # pytype:disable=not-instantiable
        )
        self.policy = self.policy.to(self.device)

        # Convert train freq parameter to TrainFreq object
        self._convert_train_freq()
        self._create_aliases()
        # Target entropy is used when learning the entropy coefficient
        if self.target_entropy == "auto":
            # automatically set target entropy if needed
            self.target_entropy = -np.prod(self.env.action_space.shape).astype(
                np.float32)
        else:
            # Force conversion
            # this will also throw an error for unexpected string
            self.target_entropy = float(self.target_entropy)

        # The entropy coefficient or entropy can be learned automatically
        # see Automating Entropy Adjustment for Maximum Entropy RL section
        # of https://arxiv.org/abs/1812.05905
        if isinstance(self.ent_coef, str) and self.ent_coef.startswith("auto"):
            # Default initial value of ent_coef when learned
            init_value = 1.0
            if "_" in self.ent_coef:
                init_value = float(self.ent_coef.split("_")[1])
                assert init_value > 0.0, "The initial value of ent_coef must be greater than 0"

            # Note: we optimize the log of the entropy coeff which is slightly different from the paper
            # as discussed in https://github.com/rail-berkeley/softlearning/issues/37
            self.log_ent_coef = th.log(
                th.ones(1, device=self.device) *
                init_value).requires_grad_(True)
            self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef],
                                                    lr=self.lr_schedule(1))
        else:
            # Force conversion to float
            # this will throw an error if a malformed string (different from 'auto')
            # is passed
            self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(
                self.device)
    def _store_transition(
        self,
        replay_buffer: ReplayBuffer,
        buffer_action: np.ndarray,
        new_obs: np.ndarray,
        reward: np.ndarray,
        done: np.ndarray,
        infos: List[Dict[str, Any]],
    ) -> None:
        """
        Store transition in the replay buffer.
        We store the normalized action and the unnormalized observation.
        It also handles terminal observations (because VecEnv resets automatically).

        :param replay_buffer: Replay buffer object where to store the transition.
        :param buffer_action: normalized action
        :param new_obs: next observation in the current episode
            or first observation of the episode (when done is True)
        :param reward: reward for the current transition
        :param done: Termination signal
        :param infos: List of additional information about the transition.
            It may contain the terminal observations and information about timeout.
        """
        # Store only the unnormalized version
        if self._vec_normalize_env is not None:
            new_obs_ = self._vec_normalize_env.get_original_obs()
            reward_ = self._vec_normalize_env.get_original_reward()
        else:
            # Avoid changing the original ones
            self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward

        # As the VecEnv resets automatically, new_obs is already the
        # first observation of the next episode
        if done and infos[0].get("terminal_observation") is not None:
            next_obs = infos[0]["terminal_observation"]
            # VecNormalize normalizes the terminal observation
            if self._vec_normalize_env is not None:
                next_obs = self._vec_normalize_env.unnormalize_obs(next_obs)
        else:
            next_obs = new_obs_

        replay_buffer.add(
            self._last_original_obs,
            next_obs,
            buffer_action,
            reward_,
            done,
            infos,
        )

        self._last_obs = new_obs
        # Save the unnormalized observation
        if self._vec_normalize_env is not None:
            self._last_original_obs = new_obs_
def _store_transition_not_done_if_truncated(
    self,
    replay_buffer: ReplayBuffer,
    buffer_action: np.ndarray,
    new_obs: np.ndarray,
    reward: np.ndarray,
    done: np.ndarray,
    infos: List[Dict[str, Any]],
) -> None:
    """
    Store transition in the replay buffer.
    We store the normalized action and the unnormalized observation.
    It also handles terminal observations (because VecEnv resets automatically).

    :param replay_buffer: Replay buffer object where to store the transition.
    :param buffer_action: normalized action
    :param new_obs: next observation in the current episode
        or first observation of the episode (when done is True)
    :param reward: reward for the current transition
    :param done: Termination signal
    :param infos: List of additional information about the transition.
        It contains the terminal observations.
    """
    # Store only the unnormalized version
    if self._vec_normalize_env is not None:
        new_obs_ = self._vec_normalize_env.get_original_obs()
        reward_ = self._vec_normalize_env.get_original_reward()
    else:
        # Avoid changing the original ones
        self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward

    # As the VecEnv resets automatically, new_obs is already the
    # first observation of the next episode
    if done and infos[0].get("terminal_observation") is not None:
        next_obs = infos[0]["terminal_observation"]
        # VecNormalize normalizes the terminal observation
        if self._vec_normalize_env is not None:
            next_obs = self._vec_normalize_env.unnormalize_obs(next_obs)
    else:
        next_obs = new_obs_

    # NOTE: The monkey patch is inside the following block of code
    done_ = np.array([False]) if infos[0].get("TimeLimit.truncated",
                                              False) else done
    replay_buffer.add(self._last_original_obs, next_obs, buffer_action,
                      reward_, done_)
    # replay_buffer.add(self._last_original_obs, next_obs, buffer_action, reward_, done)

    self._last_obs = new_obs
    # Save the unnormalized observation
    if self._vec_normalize_env is not None:
        self._last_original_obs = new_obs_
예제 #4
0
 def _setup_model(self):
     self._setup_lr_schedule()
     self.set_random_seed(self.seed)
     self.replay_buffer = ReplayBuffer(self.buffer_size, self.observation_space,
                                       self.action_space, self.device)
     self.policy = self.policy_class(self.observation_space, self.action_space,
                                     self.lr_schedule, **self.policy_kwargs)
     self.policy = self.policy.to(self.device)
예제 #5
0
    def learn(self, initial_models):
        mesa_algo = TD3(
            "MlpPolicy", self.env, verbose=1, learning_starts=1
        )  # Note: Unecessarily initializes parameters (could speed up a bit by fixing)'

        mesa_algo.set_parameters(to_torch(initial_models), exact_match=False)
        LOG_DIR = "/home/jet/catkin_ws/src/marsha/marsha_ai/training/logs/"
        MODEL_DIR = "/home/jet/catkin_ws/src/marsha/marsha_ai/training/models/"

        callback_list = []
        callback_list.append(TensorboardCallback())
        callback_list.append(
            StopTrainingOnMaxEpisodes(max_episodes=5, verbose=1))
        """callback_list.append(EvalCallback(self.env, best_model_save_path=MODEL_DIR, log_path=LOG_DIR,
                                    deterministic=True,
                                    eval_freq=5,
                                    n_eval_episodes=1))"""
        mesa_algo.learn(total_timesteps=1000, callback=callback_list
                        )  #rospy.get_param("/hyperparameters/total_timesteps")

        print("finished training! Testing mesa network...")
        test_buffer = ReplayBuffer(100,
                                   TaskEnv.observation_space,
                                   TaskEnv.action_space,
                                   device="cuda")

        test_env = Monitor(self.env)
        done = False
        ob = test_env.reset()
        while not done:
            action, state = mesa_algo.predict(ob)
            next_ob, reward, done, info = test_env.step(action)
            test_buffer.add(ob, next_ob, action, reward, done, [info])
            ob = next_ob

        meta_buffer = {"test": test_buffer, "train": mesa_algo.replay_buffer}

        optimized_mesa_parameters = mesa_algo.get_parameters()
        tf_mesa_models = from_torch(optimized_mesa_parameters)

        return meta_buffer, tf_mesa_models
예제 #6
0
    def _setup_model(self) -> None:
        super()._setup_model()
        self._create_aliases()

        # Target entropy is used when learning the entropy coefficient
        if self.target_entropy == 'auto':
            # automatically set target entropy if needed
            self.target_entropy = -np.prod(self.env.action_space.shape).astype(
                np.float32)
        else:
            # Force conversion
            # this will also throw an error for unexpected string
            self.target_entropy = float(self.target_entropy)

        # The entropy coefficient or entropy can be learned automatically
        # see Automating Entropy Adjustment for Maximum Entropy RL section
        # of https://arxiv.org/abs/1812.05905
        if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'):
            # Default initial value of ent_coef when learned
            init_value = 1.0
            if '_' in self.ent_coef:
                init_value = float(self.ent_coef.split('_')[1])
                assert init_value > 0., "The initial value of ent_coef must be greater than 0"

            # Note: we optimize the log of the entropy coeff which is slightly different from the paper
            # as discussed in https://github.com/rail-berkeley/softlearning/issues/37
            self.log_ent_coef = th.log(
                th.ones(1, device=self.device) *
                init_value).requires_grad_(True)
            self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef],
                                                    lr=self.lr_schedule(1))
        else:
            # Force conversion to float
            # this will throw an error if a malformed string (different from 'auto')
            # is passed
            self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(
                self.device)

        self.bc_buffer = ReplayBuffer(int(1e4), self.observation_space,
                                      self.action_space, self.device)
예제 #7
0
 def _setup_model(self) -> None:
     self._setup_lr_schedule()
     self.set_random_seed(self.seed)
     self.replay_buffer = ReservoirBuffer(
         self.buffer_size,
         self.observation_space,
         self.action_space,
         self.device,
         optimize_memory_usage=self.optimize_memory_usage,
     )
     self.current_experience_buffer = ReplayBuffer(
         1,
         self.observation_space,
         self.action_space,
         self.device,
         optimize_memory_usage=False,
     )
     self.policy = self.policy_class(
         self.observation_space,
         self.action_space,
         self.lr_schedule,
         **self.policy_kwargs  # pytype:disable=not-instantiable
     )
     self.policy = self.policy.to(self.device)
    def _setup_model(self) -> None:
        self._setup_lr_schedule()
        self.set_random_seed(self.seed)
        self.replay_buffer = ReplayBuffer(
            self.buffer_size,
            self.observation_space,
            self.action_space,
            self.device,
            optimize_memory_usage=self.optimize_memory_usage,
        )
        self.policy = self.policy_class(
            self.observation_space,
            self.action_space,
            self.lr_schedule,
            **self.policy_kwargs,  # pytype:disable=not-instantiable
        )
        self.policy = self.policy.to(self.device)

        # Convert train freq parameter to TrainFreq object
        self._convert_train_freq()
 def setup_buffer(self, num_samples):
     assert self.n_envs == 1, "I don't think multiple envs works for offline policies, but you can check and make suitable updates"
     self._old_buffer = self.replay_buffer
     callback = DoNothingCallback(self)
     self.replay_buffer = ReplayBuffer(
         num_samples,
         self.observation_space,
         self.action_space,
         self.device,
     )
     self.env.reset()
     train_freq = TrainFreq(num_samples, TrainFrequencyUnit("step"))
     self.collect_rollouts(
         self.env,
         train_freq=train_freq,
         action_noise=self.action_noise,
         callback=callback,
         learning_starts=0,
         replay_buffer=self.replay_buffer,
         log_interval=10,
     )
예제 #10
0
class AWAC(OffPolicyAlgorithm):
    """
	Soft Actor-Critic (SAC)
	Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
	This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
	from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo
	(https://github.com/rail-berkeley/softlearning/)
	and from Stable Baselines (https://github.com/hill-a/stable-baselines)
	Paper: https://arxiv.org/abs/1801.01290
	Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html

	Note: we use double q target and not value target as discussed
	in https://github.com/hill-a/stable-baselines/issues/270

	:param policy: (SACPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, ...)
	:param env: (GymEnv or str) The environment to learn from (if registered in Gym, can be str)
	:param learning_rate: (float or callable) learning rate for adam optimizer,
		the same learning rate will be used for all networks (Q-Values, Actor and Value function)
		it can be a function of the current progress remaining (from 1 to 0)
	:param buffer_size: (int) size of the replay buffer
	:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
	:param batch_size: (int) Minibatch size for each gradient update
	:param tau: (float) the soft update coefficient ("Polyak update", between 0 and 1)
	:param gamma: (float) the discount factor
	:param train_freq: (int) Update the model every ``train_freq`` steps.
	:param gradient_steps: (int) How many gradient update after each step
	:param n_episodes_rollout: (int) Update the model every ``n_episodes_rollout`` episodes.
		Note that this cannot be used at the same time as ``train_freq``
	:param action_noise: (ActionNoise) the action noise type (None by default), this can help
		for hard exploration problem. Cf common.noise for the different action noise type.
	:param ent_coef: (str or float) Entropy regularization coefficient. (Equivalent to
		inverse of reward scale in the original SAC paper.)  Controlling exploration/exploitation trade-off.
		Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
	:param target_update_interval: (int) update the target network every ``target_network_update_freq`` steps.
	:param target_entropy: (str or float) target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)
	:param create_eval_env: (bool) Whether to create a second environment that will be
		used for evaluating the agent periodically. (Only available when passing string for the environment)
	:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
	:param verbose: (int) the verbosity level: 0 no output, 1 info, 2 debug
	:param seed: (int) Seed for the pseudo random generators
	:param device: (str or th.device) Device (cpu, cuda, ...) on which the code should be run.
		Setting it to auto, the code will be run on the GPU if possible.
	:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
	"""
    def __init__(self,
                 policy: Union[str, Type[AWACPolicy]],
                 env: Union[GymEnv, str],
                 learning_rate: Union[float, Callable] = 3e-4,
                 buffer_size: int = int(1e6),
                 learning_starts: int = 100,
                 batch_size: int = 256,
                 tau: float = 0.005,
                 gamma: float = 0.99,
                 train_freq: int = 1,
                 gradient_steps: int = 1,
                 n_episodes_rollout: int = -1,
                 action_noise: Optional[ActionNoise] = None,
                 ent_coef: Union[str, float] = 'auto',
                 target_update_interval: int = 1,
                 target_entropy: Union[str, float] = 'auto',
                 awr_use_mle_for_vf: bool = True,
                 beta: int = 50,
                 tensorboard_log: Optional[str] = None,
                 create_eval_env: bool = False,
                 policy_kwargs: Dict[str, Any] = None,
                 verbose: int = 0,
                 seed: Optional[int] = None,
                 device: Union[th.device, str] = 'auto',
                 _init_setup_model: bool = True):

        super().__init__(policy,
                         env,
                         AWACPolicy,
                         learning_rate,
                         buffer_size,
                         learning_starts,
                         batch_size,
                         policy_kwargs,
                         tensorboard_log,
                         verbose,
                         device,
                         create_eval_env=create_eval_env,
                         seed=seed,
                         use_sde=False,
                         sde_sample_freq=-1,
                         use_sde_at_warmup=False)

        self.target_entropy = target_entropy
        self.log_ent_coef = None  # type: Optional[th.Tensor]
        self.target_update_interval = target_update_interval
        self.tau = tau
        # Entropy coefficient / Entropy temperature
        # Inverse of the reward scale
        self.ent_coef = ent_coef
        self.target_update_interval = target_update_interval
        self.train_freq = train_freq
        self.gradient_steps = gradient_steps
        self.n_episodes_rollout = n_episodes_rollout
        self.action_noise = action_noise
        self.gamma = gamma
        self.ent_coef_optimizer = None
        self.awr_use_mle_for_vf = awr_use_mle_for_vf
        self.beta = beta
        self.bc_buffer = None

        if _init_setup_model:
            self._setup_model()

    def _setup_model(self) -> None:
        super()._setup_model()
        self._create_aliases()

        # Target entropy is used when learning the entropy coefficient
        if self.target_entropy == 'auto':
            # automatically set target entropy if needed
            self.target_entropy = -np.prod(self.env.action_space.shape).astype(
                np.float32)
        else:
            # Force conversion
            # this will also throw an error for unexpected string
            self.target_entropy = float(self.target_entropy)

        # The entropy coefficient or entropy can be learned automatically
        # see Automating Entropy Adjustment for Maximum Entropy RL section
        # of https://arxiv.org/abs/1812.05905
        if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'):
            # Default initial value of ent_coef when learned
            init_value = 1.0
            if '_' in self.ent_coef:
                init_value = float(self.ent_coef.split('_')[1])
                assert init_value > 0., "The initial value of ent_coef must be greater than 0"

            # Note: we optimize the log of the entropy coeff which is slightly different from the paper
            # as discussed in https://github.com/rail-berkeley/softlearning/issues/37
            self.log_ent_coef = th.log(
                th.ones(1, device=self.device) *
                init_value).requires_grad_(True)
            self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef],
                                                    lr=self.lr_schedule(1))
        else:
            # Force conversion to float
            # this will throw an error if a malformed string (different from 'auto')
            # is passed
            self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(
                self.device)

        self.bc_buffer = ReplayBuffer(int(1e4), self.observation_space,
                                      self.action_space, self.device)

    def _create_aliases(self) -> None:
        self.actor = self.policy.actor
        self.critic = self.policy.critic
        self.critic_target = self.policy.critic_target

    def pretrain_bc(self, gradient_steps: int, batch_size: int = 64):
        statistics = []
        with trange(gradient_steps) as t:
            for gradient_step in t:
                replay_data = self.bc_buffer.sample(
                    batch_size, env=self._vec_normalize_env)
                dist = self.actor(replay_data.observations)
                actions_pi, log_prob = dist.log_prob_and_rsample()
                actor_loss = -log_prob.mean()
                actor_mse_loss = F.mse_loss(actions_pi.detach(),
                                            replay_data.actions)

                self.actor.optimizer.zero_grad()
                actor_loss.backward()
                self.actor.optimizer.step()

                statistics.append((actor_loss.item(), actor_mse_loss.item()))
                t.set_postfix(mse_loss=actor_mse_loss.item(),
                              policy_loss=actor_loss.item())
        actor_losses, mse_losses = tuple(zip(*statistics))

        logger.record("pretrain/n_updates",
                      self._n_updates,
                      exclude='tensorboard')
        logger.record("pretrain/actor_loss", np.mean(actor_losses))
        logger.record("pretrain/actor_mse_loss", np.mean(mse_losses))

    def pretrain_rl(self, gradient_steps: int, batch_size: int = 64) -> None:
        statistics = []
        with trange(gradient_steps) as t:
            for gradient_step in t:
                replay_data = self.replay_buffer.sample(
                    batch_size, env=self._vec_normalize_env)
                stats = self.train_batch(replay_data)
                statistics.append(stats)
                self._n_updates += 1
                t.set_postfix(qf_loss=stats[1], policy_loss=stats[0])
        actor_losses, critic_losses, ent_coef_losses, ent_coefs = tuple(
            zip(*statistics))

        logger.record("pretrain/n_updates",
                      self._n_updates,
                      exclude='tensorboard')
        logger.record("pretrain/ent_coef", np.mean(ent_coefs))
        logger.record("pretrain/actor_loss", np.mean(actor_losses))
        logger.record("pretrain/critic_loss", np.mean(critic_losses))
        logger.record("pretrain/ent_coef_loss", np.mean(ent_coef_losses))

    def train(self, gradient_steps: int, batch_size: int = 64) -> None:
        statistics = []
        for gradient_step in range(gradient_steps):
            replay_data = self.replay_buffer.sample(
                batch_size, env=self._vec_normalize_env)
            stats = self.train_batch(replay_data)
            statistics.append(stats)
            self._n_updates += 1
        actor_losses, critic_losses, ent_coef_losses, ent_coefs = tuple(
            zip(*statistics))

        logger.record("train/n_updates",
                      self._n_updates,
                      exclude='tensorboard')
        logger.record("train/ent_coef", np.mean(ent_coefs))
        logger.record("train/actor_loss", np.mean(actor_losses))
        logger.record("train/critic_loss", np.mean(critic_losses))
        logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))

    def train_batch(self, replay_data):
        # Action by the current actor for the sampled state
        dist = self.actor(replay_data.observations)
        actions_pi, log_prob = dist.log_prob_and_rsample()
        actor_mle = dist.mean
        """ent_coeff loss"""
        ent_coef_loss = None
        if self.ent_coef_optimizer is not None:
            # Important: detach the variable from the graph
            # so we don't change it with other losses
            # see https://github.com/rail-berkeley/softlearning/issues/60
            ent_coef = th.exp(self.log_ent_coef.detach())
            ent_coef_loss = -(
                self.log_ent_coef *
                (log_prob + self.target_entropy).detach()).mean()
        else:
            ent_coef = self.ent_coef_tensor
        """q loss"""
        with th.no_grad():
            # Select action according to policy
            next_dist = self.actor(replay_data.next_observations)
            next_actions, next_log_prob = next_dist.log_prob_and_rsample()
            # Compute the target Q value
            target_q1, target_q2 = self.critic_target(
                replay_data.next_observations, next_actions)
            target_q = th.min(target_q1, target_q2)
            target_q = replay_data.rewards + (
                1 - replay_data.dones) * self.gamma * target_q
            # td error + entropy term
            # q_backup = target_q - ent_coef * next_log_prob
            q_backup = target_q
        # Get current Q estimates
        # using action from the replay buffer
        current_q1, current_q2 = self.critic(replay_data.observations,
                                             replay_data.actions)
        # Compute critic loss
        critic_loss = 0.5 * (F.mse_loss(current_q1, q_backup) +
                             F.mse_loss(current_q2, q_backup))
        """action loss"""
        # Advantage-weighted regression
        # if self.awr_use_mle_for_vf:
        # 	v1_pi,v2_pi = self.critic(replay_data.observations, actor_mle)
        # 	v_pi = th.min(v1_pi, v2_pi)
        # else:
        # 	v1_pi,v2_pi = self.critic(replay_data.observations, actions_pi)
        # 	v_pi = th.min(v1_pi, v2_pi)
        q_adv = th.min(current_q1, current_q2)
        v1_pi, v2_pi = self.critic(replay_data.observations, actor_mle)
        v_pi = th.min(v1_pi, v2_pi)
        # q_adv = th.min(*self.critic(replay_data.observations, actions_pi))
        score = q_adv - v_pi
        weights = F.softmax(score / self.beta, dim=0)
        # actor_loss = ent_coef * log_prob.mean()
        actor_logpp = dist.log_prob(replay_data.actions)
        actor_loss = (-actor_logpp * len(weights) * weights.detach()).mean()
        """Updates"""
        # Optimize entropy coefficient, also called
        # entropy temperature or alpha in the paper
        if ent_coef_loss is not None:
            self.ent_coef_optimizer.zero_grad()
            ent_coef_loss.backward()
            self.ent_coef_optimizer.step()
        # Optimize the critic
        self.critic.optimizer.zero_grad()
        critic_loss.backward()
        self.critic.optimizer.step()
        # Optimize the actor
        self.actor.optimizer.zero_grad()
        actor_loss.backward()
        self.actor.optimizer.step()

        # Update target networks
        if self._n_updates % self.target_update_interval == 0:
            for param, target_param in zip(self.critic.parameters(),
                                           self.critic_target.parameters()):
                target_param.data.copy_(self.tau * param.data +
                                        (1 - self.tau) * target_param.data)

        if ent_coef_loss is None:
            ent_coef_loss = th.tensor([0])
        return actor_loss.item(), critic_loss.item(), ent_coef_loss.item(
        ), ent_coef.item()

    def learn(self,
              total_timesteps: int,
              callback: MaybeCallback = None,
              log_interval: int = 4,
              eval_env: Optional[GymEnv] = None,
              eval_freq: int = -1,
              n_eval_episodes: int = 5,
              tb_log_name: str = "AWAC",
              eval_log_path: Optional[str] = None,
              reset_num_timesteps: bool = True) -> OffPolicyAlgorithm:

        total_timesteps, callback = self._setup_learn(
            total_timesteps, eval_env, callback, eval_freq, n_eval_episodes,
            eval_log_path, reset_num_timesteps, tb_log_name)
        callback.on_training_start(locals(), globals())

        self.pretrain_bc(int(1e3), batch_size=self.batch_size)
        observations, actions, next_observations, rewards, dones = self.bc_buffer.observations, self.bc_buffer.actions, self.bc_buffer.next_observations, self.bc_buffer.rewards, self.bc_buffer.dones
        for data in zip(observations, next_observations, actions, rewards,
                        dones):
            self.replay_buffer.add(*data)
        self.pretrain_rl(int(1e4), batch_size=self.batch_size)

        while self.num_timesteps < total_timesteps:
            rollout = self.collect_rollouts(
                self.env,
                n_episodes=self.n_episodes_rollout,
                n_steps=self.train_freq,
                action_noise=self.action_noise,
                callback=callback,
                learning_starts=self.learning_starts,
                replay_buffer=self.replay_buffer,
                log_interval=log_interval)

            if rollout.continue_training is False:
                break

            self._update_current_progress_remaining(self.num_timesteps,
                                                    total_timesteps)

            if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts:
                gradient_steps = self.gradient_steps if self.gradient_steps > 0 else rollout.episode_timesteps
                self.train(gradient_steps, batch_size=self.batch_size)

        callback.on_training_end()
        return self

    def excluded_save_params(self) -> List[str]:
        """
		Returns the names of the parameters that should be excluded by default
		when saving the model.

		:return: (List[str]) List of parameters that should be excluded from save
		"""
        # Exclude aliases
        return super().excluded_save_params() + [
            "actor", "critic", "critic_target"
        ]

    def get_torch_variables(self) -> Tuple[List[str], List[str]]:
        """
		cf base class
		"""
        state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
        saved_tensors = ['log_ent_coef']
        if self.ent_coef_optimizer is not None:
            state_dicts.append('ent_coef_optimizer')
        else:
            saved_tensors.append('ent_coef_tensor')
        return state_dicts, saved_tensors
예제 #11
0
    qf1 = QNetwork(envs).to(device)
    qf2 = QNetwork(envs).to(device)
    qf1_target = QNetwork(envs).to(device)
    qf2_target = QNetwork(envs).to(device)
    target_actor = Actor(envs).to(device)
    target_actor.load_state_dict(actor.state_dict())
    qf1_target.load_state_dict(qf1.state_dict())
    qf2_target.load_state_dict(qf2.state_dict())
    q_optimizer = optim.Adam(list(qf1.parameters()) + list(qf2.parameters()),
                             lr=args.learning_rate)
    actor_optimizer = optim.Adam(list(actor.parameters()),
                                 lr=args.learning_rate)

    envs.single_observation_space.dtype = np.float32
    rb = ReplayBuffer(args.buffer_size,
                      envs.single_observation_space,
                      envs.single_action_space,
                      device=device)
    loss_fn = nn.MSELoss()

    # TRY NOT TO MODIFY: start the game
    obs = envs.reset()
    for global_step in range(args.total_timesteps):
        # ALGO LOGIC: put action logic here
        if global_step < args.learning_starts:
            actions = envs.action_space.sample()
        else:
            actions = actor.forward(torch.Tensor(obs).to(device))
            actions = np.array([(
                actions.tolist()[0] +
                np.random.normal(0,
                                 max_action * args.exploration_noise,
예제 #12
0
class CMVCVaRSAC(OffPolicyAlgorithm):
    def __init__(
        self,
        policy: Union[str, Type[CMVC51SACPolicy]],
        env: Union[GymEnv, str],
        min_v: float = -25,
        max_v: float = +25,
        support_dim: int = 200,
        learning_rate: Union[float, Schedule] = 3e-4,
        buffer_size: int = int(5e4),
        learning_starts: int = 100,
        batch_size: int = 64,
        tau: float = 0.005,
        gamma: float = 0.99,
        train_freq: Union[int, Tuple[int, str]] = 1,
        gradient_steps: int = 1,
        action_noise: Optional[ActionNoise] = None,
        optimize_memory_usage: bool = False,
        ent_coef: Union[str, float] = "auto",
        target_update_interval: int = 1,
        target_entropy: Union[str, float] = "auto",
        use_sde: bool = False,
        sde_sample_freq: int = -1,
        use_sde_at_warmup: bool = False,
        tensorboard_log: Optional[str] = None,
        create_eval_env: bool = False,
        policy_kwargs: Dict[str, Any] = None,
        verbose: int = 0,
        seed: Optional[int] = None,
        device: Union[th.device, str] = "auto",
        _init_setup_model: bool = True,
        cvar_alpha=0.3,
        cmv_beta=1,
    ):
        super(CMVCVaRSAC, self).__init__(
            policy,
            env,
            CMVC51SACPolicy,
            learning_rate,
            buffer_size,
            learning_starts,
            batch_size,
            tau,
            gamma,
            train_freq,
            gradient_steps,
            action_noise,
            policy_kwargs=policy_kwargs,
            tensorboard_log=tensorboard_log,
            verbose=verbose,
            device=device,
            create_eval_env=create_eval_env,
            seed=seed,
            use_sde=use_sde,
            sde_sample_freq=sde_sample_freq,
            use_sde_at_warmup=use_sde_at_warmup,
            optimize_memory_usage=optimize_memory_usage,
            supported_action_spaces=(gym.spaces.Box),
        )

        self.target_entropy = target_entropy
        self.log_ent_coef = None  # type: Optional[th.Tensor]
        # Entropy coefficient / Entropy temperature
        # Inverse of the reward scale
        self.ent_coef = ent_coef
        self.target_update_interval = target_update_interval
        self.ent_coef_optimizer = None
        self.min_v = min_v
        self.max_v = max_v
        self.support_dim = support_dim
        self.interval = (1 / (support_dim - 1)) * (max_v - min_v)
        self.supports = th.from_numpy(
            np.array([min_v + i * self.interval for i in range(support_dim)],
                     dtype=np.float32)).to(self.device)
        self._total_timesteps = None
        self.cvar_alpha = cvar_alpha
        self.cmv_beta = cmv_beta
        if _init_setup_model:
            self._setup_model()

    def _setup_model(self) -> None:
        self._setup_lr_schedule()
        self.set_random_seed(self.seed)
        self.replay_buffer = ReplayBuffer(
            self.buffer_size,
            self.observation_space,
            self.action_space,
            self.device,
            optimize_memory_usage=self.optimize_memory_usage,
        )
        self.policy = self.policy_class(
            self.observation_space,
            self.action_space,
            self.support_dim,
            self.lr_schedule,
            **self.policy_kwargs,  # pytype:disable=not-instantiable
        )
        self.policy = self.policy.to(self.device)

        # Convert train freq parameter to TrainFreq object
        self._convert_train_freq()
        self._create_aliases()
        # Target entropy is used when learning the entropy coefficient
        if self.target_entropy == "auto":
            # automatically set target entropy if needed
            self.target_entropy = -np.prod(self.env.action_space.shape).astype(
                np.float32)
        else:
            # Force conversion
            # this will also throw an error for unexpected string
            self.target_entropy = float(self.target_entropy)

        # The entropy coefficient or entropy can be learned automatically
        # see Automating Entropy Adjustment for Maximum Entropy RL section
        # of https://arxiv.org/abs/1812.05905
        if isinstance(self.ent_coef, str) and self.ent_coef.startswith("auto"):
            # Default initial value of ent_coef when learned
            init_value = 1.0
            if "_" in self.ent_coef:
                init_value = float(self.ent_coef.split("_")[1])
                assert init_value > 0.0, "The initial value of ent_coef must be greater than 0"

            # Note: we optimize the log of the entropy coeff which is slightly different from the paper
            # as discussed in https://github.com/rail-berkeley/softlearning/issues/37
            self.log_ent_coef = th.log(
                th.ones(1, device=self.device) *
                init_value).requires_grad_(True)
            self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef],
                                                    lr=self.lr_schedule(1))
        else:
            # Force conversion to float
            # this will throw an error if a malformed string (different from 'auto')
            # is passed
            self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(
                self.device)

    def _create_aliases(self) -> None:
        self.actor = self.policy.actor
        self.critic = self.policy.critic
        self.critic_target = self.policy.critic_target
        self.cmv_net = self.policy.cmv_net
        self.beta_critic = self.policy.beta_critic
        self.beta_critic_target = self.policy.beta_critic_target

    def projection(self, support_rows, target_z):
        projected_target_z = th.zeros_like(target_z)
        support_rows = support_rows.clamp(self.min_v, self.max_v - 1e-3)
        p = ((support_rows - self.min_v) % self.interval) / self.interval
        idx = ((support_rows - self.min_v) // self.interval).long()
        projected_target_z = projected_target_z.scatter_add(
            1, idx, target_z * p)
        projected_target_z = projected_target_z.scatter_add(
            1, idx + 1, target_z * (1 - p))

        return projected_target_z

    def train(self, gradient_steps: int, batch_size: int = 64) -> None:
        # Update optimizers learning rate
        optimizers = [self.actor.optimizer, self.critic.optimizer]
        if self.ent_coef_optimizer is not None:
            optimizers += [self.ent_coef_optimizer]

        # Update learning rate according to lr schedule
        self._update_learning_rate(optimizers)

        ent_coef_losses, ent_coefs = [], []
        actor_losses, critic_losses, reward_losses, feature_pred_losses = [], [], [], []
        cvars = []
        qs = []
        critic_beta_losses = []

        for gradient_step in range(gradient_steps):
            # Sample replay buffer
            replay_data = self.replay_buffer.sample(
                batch_size, env=self._vec_normalize_env)

            # We need to sample because `log_std` may have changed between two gradient steps
            if self.use_sde:
                self.actor.reset_noise()

            # Action by the current actor for the sampled state
            actions_pi, log_prob = self.actor.action_log_prob(
                replay_data.observations)
            log_prob = log_prob.reshape(-1, 1)

            ent_coef_loss = None
            if self.ent_coef_optimizer is not None:
                # Important: detach the variable from the graph
                # so we don't change it with other losses
                # see https://github.com/rail-berkeley/softlearning/issues/60
                ent_coef = th.exp(self.log_ent_coef.detach())
                ent_coef_loss = -(
                    self.log_ent_coef *
                    (log_prob + self.target_entropy).detach()).mean()
                ent_coef_losses.append(ent_coef_loss.item())
            else:
                ent_coef = self.ent_coef_tensor

            ent_coefs.append(ent_coef.item())

            # Optimize entropy coefficient, also called
            # entropy temperature or alpha in the paper
            if ent_coef_loss is not None:
                self.ent_coef_optimizer.zero_grad()
                ent_coef_loss.backward()
                self.ent_coef_optimizer.step()

            with th.no_grad():
                # Select action according to policy
                next_actions, next_log_prob = self.actor.action_log_prob(
                    replay_data.next_observations)
                # Compute the next Q values: min over all critics targets
                target_zs = th.cat(self.critic_target(
                    replay_data.next_observations, next_actions),
                                   dim=1)
                # add entropy term
                target_supports = self.supports.clone().detach()
                target_supports = target_supports - ent_coef * next_log_prob.reshape(
                    -1, 1)
                # td error + entropy term
                target_supports = replay_data.rewards + (
                    1 - replay_data.dones) * self.gamma * target_supports

            # Get current Q-values estimates for each critic network
            # using action from the replay buffer
            current_zs = th.cat(self.critic(replay_data.observations,
                                            replay_data.actions),
                                dim=1)
            target_zs = self.projection(target_supports, target_zs)
            # Compute critic loss

            critic_loss = -th.mean(th.log(current_zs + 1e-12) * target_zs)
            critic_losses.append(critic_loss.item())

            # Optimize the critic
            self.critic.optimizer.zero_grad()
            critic_loss.backward()
            self.critic.optimizer.step()

            # Compute
            # For the CMV Learning
            with th.no_grad():
                next_z = self.critic_target.features_extractor(
                    replay_data.next_observations)
            # predicted reward, and predicted next observation
            r_pred, z_pred = self.cmv_net(replay_data.observations,
                                          replay_data.actions)
            mse_r_pred = th.mean(th.square(r_pred - replay_data.rewards))
            mse_z_pred = th.mean(th.square(z_pred - next_z))
            loss_cmv = mse_r_pred + mse_z_pred

            reward_losses.append(mse_r_pred.item())
            feature_pred_losses.append(mse_z_pred.item())

            # Optimize the CMV Nets
            self.cmv_net.optimizer.zero_grad()
            loss_cmv.backward()
            self.cmv_net.optimizer.step()

            with th.no_grad():
                # Select action according to policy
                # Compute the next Q values: min over all critics targets
                next_q_beta_values = th.cat(self.beta_critic_target(
                    replay_data.next_observations, next_actions),
                                            dim=1)
                next_q_beta_values, _ = th.min(next_q_beta_values,
                                               dim=1,
                                               keepdim=True)
                # add entropy term
                next_q_beta_values = next_q_beta_values - ent_coef * next_log_prob.reshape(
                    -1, 1)
                # td error + entropy term
                target_q_beta_values = loss_cmv.detach() + (
                    1 - replay_data.dones) * (self.gamma**
                                              2) * next_q_beta_values

            # Get current Q-beta values estimates for each critic network
            # using action from the replay buffer
            current_q_beta_values = self.beta_critic(replay_data.observations,
                                                     replay_data.actions)

            # Compute critic beta loss
            critic_beta_loss = \
                0.5 * sum(
                    [F.mse_loss(current_q_beta, target_q_beta_values) for current_q_beta in current_q_beta_values])
            critic_beta_losses.append(critic_beta_loss.item())

            # Optimize the critic beta
            self.beta_critic.optimizer.zero_grad()
            critic_beta_loss.backward()
            self.beta_critic.optimizer.step()

            # Compute actor loss
            # Alternative: actor_loss = th.mean(log_prob - qf1_pi - qf1_beta_pi)
            # Mean over all critic networks
            z_pi = th.cat(self.critic.forward(replay_data.observations,
                                              actions_pi),
                          dim=1)

            z_cdf = th.cumsum(z_pi, dim=-1)
            adjust_pdf = th.where(th.le(z_cdf, self.cvar_alpha), z_pi,
                                  th.zeros_like(z_pi))
            adjust_pdf = th.div(adjust_pdf,
                                th.sum(adjust_pdf, dim=-1, keepdim=True))
            q_pi = adjust_pdf @ self.supports
            cvars.append(th.mean(q_pi).item())
            qs.append(th.mean(z_pi @ self.supports).item())
            q_beta_values_pi = th.cat(self.beta_critic.forward(
                replay_data.observations, actions_pi),
                                      dim=1)
            max_qf_beta_pi, _ = th.max(q_beta_values_pi, dim=1, keepdim=True)
            actor_loss = (ent_coef * log_prob - q_pi +
                          self.cmv_beta * next_q_beta_values).mean()
            actor_losses.append(actor_loss.item())

            # Optimize the actor
            self.actor.optimizer.zero_grad()
            actor_loss.backward()
            self.actor.optimizer.step()

            # Update target networks
            if gradient_step % self.target_update_interval == 0:
                polyak_update(self.critic.parameters(),
                              self.critic_target.parameters(), self.tau)
                polyak_update(self.beta_critic.parameters(),
                              self.beta_critic_target.parameters(), self.tau)

        self._n_updates += gradient_steps
        fps = int(self.num_timesteps / (time.time() - self.start_time))
        remaining_steps = self._total_timesteps - self.num_timesteps

        eta = int(round(remaining_steps / fps))
        logger.record("time/eta",
                      timedelta(seconds=eta),
                      exclude="tensorboard")
        logger.record("train/CVaR Alpha", self.cvar_alpha)
        logger.record("train/CMV Beta", self.cmv_beta)
        logger.record("train/CVaR", np.mean(cvars))
        logger.record("train/Q-value", np.mean(qs))
        logger.record("train/n_updates",
                      self._n_updates,
                      exclude="tensorboard")
        logger.record("train/ent_coef", np.mean(ent_coefs))
        logger.record("train/actor_loss", np.mean(actor_losses))
        logger.record("train/critic_loss", np.mean(critic_losses))
        logger.record("train/reward error", np.mean(reward_losses))
        logger.record("train/s_t+1_error", np.mean(feature_pred_losses))
        logger.record("train/beta_Q_loss", np.mean(critic_beta_losses))
        if len(ent_coef_losses) > 0:
            logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))

    def learn(
        self,
        total_timesteps: int,
        callback: MaybeCallback = None,
        log_interval: int = 4,
        eval_env: Optional[GymEnv] = None,
        eval_freq: int = -1,
        n_eval_episodes: int = 5,
        tb_log_name: str = "C51SAC",
        eval_log_path: Optional[str] = None,
        reset_num_timesteps: bool = True,
    ) -> OffPolicyAlgorithm:

        return super(CMVCVaRSAC, self).learn(
            total_timesteps=total_timesteps,
            callback=callback,
            log_interval=log_interval,
            eval_env=eval_env,
            eval_freq=eval_freq,
            n_eval_episodes=n_eval_episodes,
            tb_log_name=tb_log_name,
            eval_log_path=eval_log_path,
            reset_num_timesteps=reset_num_timesteps,
        )

    def _excluded_save_params(self) -> List[str]:
        return super(CMVCVaRSAC, self)._excluded_save_params() + [
            "actor", "critic", "critic_target"
        ]

    def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
        state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
        saved_pytorch_variables = ["log_ent_coef"]
        if self.ent_coef_optimizer is not None:
            state_dicts.append("ent_coef_optimizer")
        else:
            saved_pytorch_variables.append("ent_coef_tensor")
        return state_dicts, saved_pytorch_variables
예제 #13
0
    device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
    envs = VecFrameStack(
        DummyVecEnv([make_env(args.gym_id, args.seed, 0)]),
        4,
    )
    assert isinstance(envs.action_space, Discrete), "only discrete action space is supported"

    # TRY NOT TO MODIFY: seeding
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = args.torch_deterministic

    # ALGO LOGIC: initialize agent here:
    rb = ReplayBuffer(args.buffer_size, envs.observation_space, envs.action_space, device=device, optimize_memory_usage=True)
    q_network = QNetwork(envs).to(device)
    target_network = QNetwork(envs).to(device)
    target_network.load_state_dict(q_network.state_dict())
    optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
    loss_fn = nn.MSELoss()

    # TRY NOT TO MODIFY: start the game
    obs = envs.reset()
    for global_step in range(args.total_timesteps):
        epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction * args.total_timesteps, global_step)
        if random.random() < epsilon:
            actions = [envs.action_space.sample()]
        else:
            logits = q_network.forward(torch.Tensor(obs).to(device))
            actions = torch.argmax(logits, dim=1).cpu().numpy()
예제 #14
0
def main(args):
    if args.env_type == 'atari':
        # env = make_atari_env(
        #     env_name=args.env_name,
        #     action_repeat=args.action_repeat,
        #     frame_stack=args.frame_stack
        # )
        # eval_env = make_atari_env(
        #     env_name=args.env_name,
        #     action_repeat=args.action_repeat,
        #     frame_stack=args.frame_stack
        # )
        env = make_atari_env(env_id=args.env_name)
        eval_env = make_atari_env(env_id=args.env_name)
    elif args.env_type == 'dmc_locomotion':
        env = make_locomotion_env(
            env_name=args.env_name,
            seed=args.seed,
            episode_length=args.episode_length,
            from_pixels=args.pixel_obs,
            action_repeat=args.action_repeat,
            obs_height=args.obs_height,
            obs_width=args.obs_width,
            camera_id=args.env_camera_id,
            mode=args.mode
        )
        eval_env = make_locomotion_env(
            env_name=args.env_name,
            seed=args.seed,
            episode_length=args.episode_length,
            from_pixels=args.pixel_obs,
            action_repeat=args.action_repeat,
            obs_height=args.obs_height,
            obs_width=args.obs_width,
            camera_id=args.env_camera_id,
            mode=args.mode
        )
    # Initialize environment
    if args.seed is None:
        args.seed = np.random.randint(int(1e9))
    utils.set_seed_everywhere(args.seed, env=env, eval_env=eval_env)
    utils.make_dir(args.work_dir)
    model_dir = utils.make_dir(os.path.join(args.work_dir, 'model'))
    video_dir = utils.make_dir(os.path.join(args.work_dir, 'video'))
    video = VideoRecorder(video_dir if args.save_video else None, args.env_type,
                          height=448, width=448, camera_id=args.video_camera_id)

    # Prepare agent
    assert torch.cuda.is_available(), 'must have cuda enabled'
    device = torch.device(args.device)

    if args.env_type == 'atari':
        # replay_buffer = buffers.FrameStackReplayBuffer(
        #     obs_space=env.observation_space,
        #     action_space=env.action_space,
        #     capacity=args.replay_buffer_capacity,
        #     frame_stack=args.frame_stack,
        #     device=device,
        #     optimize_memory_usage=True,
        # )
        from stable_baselines3.common.buffers import ReplayBuffer
        replay_buffer = ReplayBuffer(
            args.replay_buffer_capacity,
            env.observation_space,
            env.action_space,
            device,
            optimize_memory_usage=True,
        )
        # replay_buffer = buffers.ReplayBuffer(
        #     obs_space=env.observation_space,
        #     action_space=env.action_space,
        #     capacity=args.replay_buffer_capacity,
        #     device=device,
        #     optimize_memory_usage=True,
        # )
    elif args.env_type == 'dmc_locomotion':
        replay_buffer = buffers.ReplayBuffer(
            obs_space=env.observation_space,
            action_space=env.action_space,
            capacity=args.replay_buffer_capacity,
            device=device,
            optimize_memory_usage=True,
        )
    agent = make_agent(
        obs_space=env.observation_space,
        action_space=env.action_space,
        device=device,
        args=args
    )

    logger = Logger(args.work_dir,
                    log_frequency=args.log_freq,
                    action_repeat=args.action_repeat,
                    save_tb=args.save_tb)
    episode, episode_reward, episode_step, done = 0, 0, 0, True
    obs = env.reset()
    start_time = time.time()
    for step in range(args.train_steps + 1):
        # (chongyi zheng): we can also evaluate and save model when current episode is not finished
        # Evaluate agent periodically
        if step % args.eval_freq == 0:
            print('Evaluating:', args.work_dir)
            logger.log('eval/episode', episode, step)
            evaluate(eval_env, agent, video, args.num_eval_episodes, logger, step)

        # Save agent periodically
        if step % args.save_freq == 0 and step > 0:
            if args.save_model:
                agent.save(model_dir, step)

        if done:
            if step > 0:
                logger.log('train/duration', time.time() - start_time, step)
                start_time = time.time()
                logger.dump(step, ty='train', save=(step > args.init_steps))

            logger.log('train/episode_reward', episode_reward, step)

            obs = env.reset()
            done = False
            episode_reward = 0
            episode_step = 0
            episode += 1

            logger.log('train/episode', episode, step)

        # Sample action for data collection
        if step < args.init_steps:
            action = env.action_space.sample()
        else:
            # with utils.eval_mode(agent):
            action = agent.act(obs, True)
            # action, _ = agent.predict(obs, deterministic=False)

            if 'dqn' in args.algo:
                agent.schedule_exploration_rate(step, logger)
                # agent._update_current_progress_remaining(step, args.train_steps)
                # agent._on_step()

        # Run training update
        if step >= args.init_steps and step % args.train_freq == 0:
            # TODO (chongyi zheng): Do we need multiple updates after initial data collection?
            # num_updates = args.init_steps if step == args.init_steps else 1
            # for _ in range(num_updates):
            # 	agent.update(replay_buffer, logger, step)
            for _ in range(args.num_train_iters):
                agent.update(replay_buffer, logger, step)
            # agent.train(batch_size=args.batch_size, gradient_steps=args.num_train_iters)

        # Take step
        next_obs, reward, done, info = env.step(action)
        # agent.replay_buffer.add(obs, next_obs, action, reward, done)
        # replay_buffer.add(obs, action, reward, next_obs, done)
        replay_buffer.add(np.expand_dims(np.asarray(obs), axis=0),
                          np.expand_dims(np.asarray(next_obs), axis=0),
                          np.expand_dims(action, axis=0),
                          np.expand_dims(np.sign(reward), axis=0),
                          np.expand_dims(done, axis=0))

        episode_reward += reward
        obs = next_obs
        episode_step += 1
예제 #15
0
class ReservoirOffPolicyAlgorithm(BaseAlgorithm):
    """
    The base for Off-Policy algorithms (ex: SAC/TD3)

    :param policy: Policy object
    :param env: The environment to learn from
                (if registered in Gym, can be str. Can be None for loading trained models)
    :param policy_base: The base policy used by this method
    :param learning_rate: learning rate for the optimizer,
        it can be a function of the current progress remaining (from 1 to 0)
    :param buffer_size: size of the replay buffer
    :param learning_starts: how many steps of the model to collect transitions for before learning starts
    :param batch_size: Minibatch size for each gradient update
    :param tau: the soft update coefficient ("Polyak update", between 0 and 1)
    :param gamma: the discount factor
    :param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable.
    :param gradient_steps: How many gradient steps to do after each rollout
        (see ``train_freq`` and ``n_episodes_rollout``)
        Set to ``-1`` means to do as many gradient steps as steps done in the environment
        during the rollout.
    :param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes.
        Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable.
    :param action_noise: the action noise type (None by default), this can help
        for hard exploration problem. Cf common.noise for the different action noise type.
    :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
        at a cost of more complexity.
        See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
    :param policy_kwargs: Additional arguments to be passed to the policy on creation
    :param tensorboard_log: the log location for tensorboard (if None, no logging)
    :param verbose: The verbosity level: 0 none, 1 training information, 2 debug
    :param device: Device on which the code should run.
        By default, it will try to use a Cuda compatible device and fallback to cpu
        if it is not possible.
    :param support_multi_env: Whether the algorithm supports training
        with multiple environments (as in A2C)
    :param create_eval_env: Whether to create a second environment that will be
        used for evaluating the agent periodically. (Only available when passing string for the environment)
    :param monitor_wrapper: When creating an environment, whether to wrap it
        or not in a Monitor wrapper.
    :param seed: Seed for the pseudo random generators
    :param use_sde: Whether to use State Dependent Exploration (SDE)
        instead of action noise exploration (default: False)
    :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
        Default: -1 (only sample at the beginning of the rollout)
    :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling
        during the warm up phase (before learning starts)
    :param sde_support: Whether the model support gSDE or not
    :param remove_time_limit_termination: Remove terminations (dones) that are due to time limit.
        See https://github.com/hill-a/stable-baselines/issues/863
    """

    def __init__(
            self,
            policy: Type[BasePolicy],
            env: Union[GymEnv, str],
            policy_base: Type[BasePolicy],
            learning_rate: Union[float, Callable],
            buffer_size: int = int(1e6),
            learning_starts: int = 100,
            batch_size: int = 256,
            tau: float = 0.005,
            gamma: float = 0.99,
            train_freq: int = 1,
            gradient_steps: int = 1,
            n_episodes_rollout: int = -1,
            action_noise: Optional[ActionNoise] = None,
            optimize_memory_usage: bool = False,
            policy_kwargs: Dict[str, Any] = None,
            tensorboard_log: Optional[str] = None,
            verbose: int = 0,
            device: Union[th.device, str] = "auto",
            support_multi_env: bool = False,
            create_eval_env: bool = False,
            monitor_wrapper: bool = True,
            seed: Optional[int] = None,
            use_sde: bool = False,
            sde_sample_freq: int = -1,
            use_sde_at_warmup: bool = False,
            sde_support: bool = True,
            remove_time_limit_termination: bool = False,
    ):

        super(ReservoirOffPolicyAlgorithm, self).__init__(
            policy=policy,
            env=env,
            policy_base=policy_base,
            learning_rate=learning_rate,
            policy_kwargs=policy_kwargs,
            tensorboard_log=tensorboard_log,
            verbose=verbose,
            device=device,
            support_multi_env=support_multi_env,
            create_eval_env=create_eval_env,
            monitor_wrapper=monitor_wrapper,
            seed=seed,
            use_sde=use_sde,
            sde_sample_freq=sde_sample_freq,
        )
        self.buffer_size = buffer_size
        self.batch_size = batch_size
        self.learning_starts = learning_starts
        self.tau = tau
        self.gamma = gamma
        self.train_freq = train_freq
        self.gradient_steps = gradient_steps
        self.n_episodes_rollout = n_episodes_rollout
        self.action_noise = action_noise
        self.optimize_memory_usage = optimize_memory_usage

        # Remove terminations (dones) that are due to time limit
        # see https://github.com/hill-a/stable-baselines/issues/863
        self.remove_time_limit_termination = remove_time_limit_termination

        if train_freq > 0 and n_episodes_rollout > 0:
            warnings.warn(
                "You passed a positive value for `train_freq` and `n_episodes_rollout`."
                "Please make sure this is intended. "
                "The agent will collect data by stepping in the environment "
                "until both conditions are true: "
                "`number of steps in the env` >= `train_freq` and "
                "`number of episodes` > `n_episodes_rollout`"
            )

        self.actor = None  # type: Optional[th.nn.Module]
        self.replay_buffer = None  # type: Optional[ReservoirBuffer]
        self.current_experience_buffer = None  # type: Optional[ReplayBuffer]
        # Update policy keyword arguments
        if sde_support:
            self.policy_kwargs["use_sde"] = self.use_sde
        # For gSDE only
        self.use_sde_at_warmup = use_sde_at_warmup

        self.update_env(env, support_multi_env=support_multi_env, create_eval_env=create_eval_env,
                        monitor_wrapper=monitor_wrapper, is_reservoir_replay=True)

    def _setup_model(self) -> None:
        self._setup_lr_schedule()
        self.set_random_seed(self.seed)
        self.replay_buffer = ReservoirBuffer(
            self.buffer_size,
            self.observation_space,
            self.action_space,
            self.device,
            optimize_memory_usage=self.optimize_memory_usage,
        )
        self.current_experience_buffer = ReplayBuffer(
            1,
            self.observation_space,
            self.action_space,
            self.device,
            optimize_memory_usage=False,
        )
        self.policy = self.policy_class(
            self.observation_space,
            self.action_space,
            self.lr_schedule,
            **self.policy_kwargs  # pytype:disable=not-instantiable
        )
        self.policy = self.policy.to(self.device)

    def save_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None:
        """
        Save the replay buffer as a pickle file.

        :param path: Path to the file where the replay buffer should be saved.
            if path is a str or pathlib.Path, the path is automatically created if necessary.
        """
        assert self.replay_buffer is not None, "The replay buffer is not defined"
        save_to_pkl(path, self.replay_buffer, self.verbose)

    def load_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None:
        """
        Load a replay buffer from a pickle file.

        :param path: Path to the pickled replay buffer.
        """
        self.replay_buffer = load_from_pkl(path, self.verbose)
        assert isinstance(self.replay_buffer, ReplayBuffer), "The replay buffer must inherit from ReplayBuffer class"

    def _setup_learn(
            self,
            total_timesteps: int,
            eval_env: Optional[GymEnv],
            callback: Union[None, Callable, List[BaseCallback], BaseCallback] = None,
            eval_freq: int = 10000,
            n_eval_episodes: int = 5,
            log_path: Optional[str] = None,
            reset_num_timesteps: bool = True,
            tb_log_name: str = "run",
    ) -> Tuple[int, BaseCallback]:
        """
        cf `BaseAlgorithm`.
        """
        # Prevent continuity issue by truncating trajectory
        # when using memory efficient replay buffer
        # see https://github.com/DLR-RM/stable-baselines3/issues/46
        truncate_last_traj = (
                self.optimize_memory_usage
                and reset_num_timesteps
                and self.replay_buffer is not None
                and (self.replay_buffer.full or self.replay_buffer.pos > 0)
        )

        if truncate_last_traj:
            warnings.warn(
                "The last trajectory in the replay buffer will be truncated, "
                "see https://github.com/DLR-RM/stable-baselines3/issues/46."
                "You should use `reset_num_timesteps=False` or `optimize_memory_usage=False`"
                "to avoid that issue."
            )
            # Go to the previous index
            pos = (self.replay_buffer.pos - 1) % self.replay_buffer.buffer_size
            self.replay_buffer.dones[pos] = True

        return super()._setup_learn(
            total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, log_path, reset_num_timesteps, tb_log_name
        )

    def learn(
            self,
            total_timesteps: int,
            callback: MaybeCallback = None,
            log_interval: int = 4,
            eval_env: Optional[GymEnv] = None,
            eval_freq: int = -1,
            n_eval_episodes: int = 5,
            tb_log_name: str = "run",
            eval_log_path: Optional[str] = None,
            reset_num_timesteps: bool = True,
    ) -> "ReservoirOffPolicyAlgorithm":

        total_timesteps, callback = self._setup_learn(
            total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, eval_log_path, reset_num_timesteps,
            tb_log_name
        )

        callback.on_training_start(locals(), globals())

        while self.num_timesteps < total_timesteps:

            rollout = self.collect_rollouts(
                self.env,
                n_episodes=self.n_episodes_rollout,
                n_steps=self.train_freq,
                action_noise=self.action_noise,
                callback=callback,
                learning_starts=self.learning_starts,
                replay_buffer=self.replay_buffer,
                log_interval=log_interval,
            )

            if rollout.continue_training is False:
                break

            if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts:
                # If no `gradient_steps` is specified,
                # do as many gradients steps as steps performed during the rollout
                gradient_steps = self.gradient_steps if self.gradient_steps > 0 else rollout.episode_timesteps
                self.train(batch_size=self.batch_size, gradient_steps=gradient_steps)

        callback.on_training_end()

        return self

    def train(self, gradient_steps: int, batch_size: int) -> None:
        """
        Sample the replay buffer and do the updates
        (gradient descent and update target networks)
        """
        raise NotImplementedError()

    def _sample_action(
            self, learning_starts: int, action_noise: Optional[ActionNoise] = None
    ) -> Tuple[np.ndarray, np.ndarray]:
        """
        Sample an action according to the exploration policy.
        This is either done by sampling the probability distribution of the policy,
        or sampling a random action (from a uniform distribution over the action space)
        or by adding noise to the deterministic output.

        :param action_noise: Action noise that will be used for exploration
            Required for deterministic policy (e.g. TD3). This can also be used
            in addition to the stochastic policy for SAC.
        :param learning_starts: Number of steps before learning for the warm-up phase.
        :return: action to take in the environment
            and scaled action that will be stored in the replay buffer.
            The two differs when the action space is not normalized (bounds are not [-1, 1]).
        """
        # Select action randomly or according to policy
        if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup):
            # Warmup phase
            unscaled_action = np.array([self.action_space.sample()])
        else:
            # Note: when using continuous actions,
            # we assume that the policy uses tanh to scale the action
            # We use non-deterministic action in the case of SAC, for TD3, it does not matter
            unscaled_action, _ = self.predict(self._last_obs, deterministic=False)

        # Rescale the action from [low, high] to [-1, 1]
        if isinstance(self.action_space, gym.spaces.Box):
            scaled_action = self.policy.scale_action(unscaled_action)

            # Add noise to the action (improve exploration)
            if action_noise is not None:
                scaled_action = np.clip(scaled_action + action_noise(), -1, 1)

            # We store the scaled action in the buffer
            buffer_action = scaled_action
            action = self.policy.unscale_action(scaled_action)
        else:
            # Discrete case, no need to normalize or clip
            buffer_action = unscaled_action
            action = buffer_action
        return action, buffer_action

    def _dump_logs(self) -> None:
        """
        Write log.
        """
        try:
            fps = int(self.num_timesteps / (time.time() - self.start_time))
        except ZeroDivisionError:
            warnings.warn("fps dump had zero division somehow, storing 0 instead.")
            fps = 0
        logger.record("time/episodes", self._episode_num, exclude="tensorboard")
        if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
            logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]))
            logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]))
        logger.record("time/fps", fps)
        logger.record("time/time_elapsed", int(time.time() - self.start_time), exclude="tensorboard")
        logger.record("time/total timesteps", self.num_timesteps, exclude="tensorboard")
        if self.use_sde:
            logger.record("train/std", (self.actor.get_std()).mean().item())

        if len(self.ep_success_buffer) > 0:
            logger.record("rollout/success rate", safe_mean(self.ep_success_buffer))
        # Pass the number of timesteps for tensorboard
        logger.dump(step=self.num_timesteps)

    def _on_step(self) -> None:
        """
        Method called after each step in the environment.
        It is meant to trigger DQN target network update
        but can be used for other purposes
        """
        pass

    def collect_rollouts(
            self,
            env: VecEnv,
            callback: BaseCallback,
            n_episodes: int = 1,
            n_steps: int = -1,
            action_noise: Optional[ActionNoise] = None,
            learning_starts: int = 0,
            replay_buffer: Optional[ReservoirBuffer] = None,
            log_interval: Optional[int] = None,
    ) -> RolloutReturn:
        """
        Collect experiences and store them into a ReplayBuffer.

        :param env: The training environment
        :param callback: Callback that will be called at each step
            (and at the beginning and end of the rollout)
        :param n_episodes: Number of episodes to use to collect rollout data
            You can also specify a ``n_steps`` instead
        :param n_steps: Number of steps to use to collect rollout data
            You can also specify a ``n_episodes`` instead.
        :param action_noise: Action noise that will be used for exploration
            Required for deterministic policy (e.g. TD3). This can also be used
            in addition to the stochastic policy for SAC.
        :param learning_starts: Number of steps before learning for the warm-up phase.
        :param replay_buffer:
        :param log_interval: Log data every ``log_interval`` episodes
        :return:
        """
        episode_rewards, total_timesteps = [], []
        total_steps, total_episodes = 0, 0

        assert isinstance(env, VecEnv), "You must pass a VecEnv"
        assert env.num_envs == 1, "OffPolicyAlgorithm only support single environment"

        if self.use_sde:
            self.actor.reset_noise()

        callback.on_rollout_start()
        continue_training = True

        while total_steps < n_steps or total_episodes < n_episodes:
            done = False
            episode_reward, episode_timesteps = 0.0, 0

            while not done:

                if self.use_sde and self.sde_sample_freq > 0 and total_steps % self.sde_sample_freq == 0:
                    # Sample a new noise matrix
                    self.actor.reset_noise()

                # Select action randomly or according to policy
                action, buffer_action = self._sample_action(learning_starts, action_noise)

                # Rescale and perform action
                new_obs, reward, done, infos = env.step(action)

                self.num_timesteps += 1
                episode_timesteps += 1
                total_steps += 1

                # Give access to local variables
                callback.update_locals(locals())
                # Only stop training if return value is False, not when it is None.
                if callback.on_step() is False:
                    return RolloutReturn(0.0, total_steps, total_episodes, continue_training=False)

                episode_reward += reward

                # Retrieve reward and episode length if using Monitor wrapper
                self._update_info_buffer(infos, done)

                # Store data in replay buffer
                if replay_buffer is not None:
                    # Store only the unnormalized version
                    if self._vec_normalize_env is not None:
                        new_obs_ = self._vec_normalize_env.get_original_obs()
                        reward_ = self._vec_normalize_env.get_original_reward()
                    else:
                        # Avoid changing the original ones
                        self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward

                    replay_buffer.add(self._last_original_obs, new_obs_, buffer_action, reward_, done,
                                      self.num_timesteps - 1)
                self.current_experience_buffer.add(self._last_original_obs, new_obs_, buffer_action, reward_, done)

                self._last_obs = new_obs
                # Save the unnormalized observation
                if self._vec_normalize_env is not None:
                    self._last_original_obs = new_obs_

                self._update_current_progress_remaining(self.num_timesteps, self._total_timesteps)

                # For DQN, check if the target network should be updated
                # and update the exploration schedule
                # For SAC/TD3, the update is done as the same time as the gradient update
                # see https://github.com/hill-a/stable-baselines/issues/900
                self._on_step()

                if 0 < n_steps <= total_steps:
                    break

            if done:
                total_episodes += 1
                self._episode_num += 1
                episode_rewards.append(episode_reward)
                total_timesteps.append(episode_timesteps)

                if action_noise is not None:
                    action_noise.reset()

                # Log training infos
                if log_interval is not None and self._episode_num % log_interval == 0:
                    self._dump_logs()

        mean_reward = np.mean(episode_rewards) if total_episodes > 0 else 0.0

        callback.on_rollout_end()

        return RolloutReturn(mean_reward, total_steps, total_episodes, continue_training)

    def update_env(self, env, support_multi_env: bool = False, eval_env: Optional[GymEnv] = None,
                   monitor_wrapper: bool = True, is_reservoir_replay: bool = True, **kwargs):
        """
        Replace current env with new env.
        :param env: Gym environment (activated, not a string).
        :param support_multi_env: Whether the algorithm supports training
        with multiple environments (as in A2C)
        :param eval_env: Environment to use for evaluation.
        :param monitor_wrapper: When creating an environment, whether to wrap it
        or not in a Monitor wrapper.
        :param is_reservoir_replay: Whether experience replay is normal or reservoir
        :param kwargs: Does nothing, just so more arguments can pass without method failing
        :return:
        """
        if self.replay_buffer is not None:
            if is_reservoir_replay:
                self.replay_buffer.is_reservoir = True
            else:
                self.replay_buffer.is_reservoir = False

        if env is not None:
            if eval_env is not None:
                self.eval_env = eval_env
                if monitor_wrapper:
                    self.eval_env = Monitor(self.eval_env, filename=None)

            if monitor_wrapper:
                env = Monitor(env, filename=None)
            env = self._wrap_env(env, self.verbose)

            self.observation_space = env.observation_space
            self.action_space = env.action_space
            self.n_envs = env.num_envs
            self.env = env

            if not support_multi_env and self.n_envs > 1:
                raise ValueError(
                    "Error: the model does not support multiple envs; it requires " "a single vectorized environment."
                )

    def add_memories_from_another_replay_mem(self, another_replay_mem: ReplayBuffer):
        for i in range(another_replay_mem.buffer_size):
            self.replay_buffer.add(
                obs=another_replay_mem.observations[i],
                next_obs=another_replay_mem.next_observations[i],
                action=another_replay_mem.actions[i],
                reward=another_replay_mem.rewards[i],
                done=another_replay_mem.dones[i],
                experience_index=0,  # doesn't matter
            )