예제 #1
0
                                  info["episode"]["r"], global_step)
                break

        # TRY NOT TO MODIFY: save data to reply buffer; handle `terminal_observation`
        real_next_obs = next_obs.copy()
        for idx, d in enumerate(dones):
            if d:
                real_next_obs[idx] = infos[idx]["terminal_observation"]
        rb.add(obs, real_next_obs, actions, rewards, dones, infos)

        # TRY NOT TO MODIFY: CRUCIAL step easy to overlook
        obs = next_obs

        # ALGO LOGIC: training.
        if global_step > args.learning_starts:
            data = rb.sample(args.batch_size)
            with torch.no_grad():
                clipped_noise = (torch.randn_like(torch.Tensor(actions[0])) *
                                 args.policy_noise).clamp(
                                     -args.noise_clip, args.noise_clip)

                next_state_actions = (
                    target_actor.forward(data.next_observations) +
                    clipped_noise.to(device)).clamp(
                        envs.single_action_space.low[0],
                        envs.single_action_space.high[0])
                qf1_next_target = qf1_target.forward(data.next_observations,
                                                     next_state_actions)
                qf2_next_target = qf2_target.forward(data.next_observations,
                                                     next_state_actions)
                min_qf_next_target = torch.min(qf1_next_target,
예제 #2
0
class AWAC(OffPolicyAlgorithm):
    """
	Soft Actor-Critic (SAC)
	Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
	This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
	from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo
	(https://github.com/rail-berkeley/softlearning/)
	and from Stable Baselines (https://github.com/hill-a/stable-baselines)
	Paper: https://arxiv.org/abs/1801.01290
	Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html

	Note: we use double q target and not value target as discussed
	in https://github.com/hill-a/stable-baselines/issues/270

	:param policy: (SACPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, ...)
	:param env: (GymEnv or str) The environment to learn from (if registered in Gym, can be str)
	:param learning_rate: (float or callable) learning rate for adam optimizer,
		the same learning rate will be used for all networks (Q-Values, Actor and Value function)
		it can be a function of the current progress remaining (from 1 to 0)
	:param buffer_size: (int) size of the replay buffer
	:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
	:param batch_size: (int) Minibatch size for each gradient update
	:param tau: (float) the soft update coefficient ("Polyak update", between 0 and 1)
	:param gamma: (float) the discount factor
	:param train_freq: (int) Update the model every ``train_freq`` steps.
	:param gradient_steps: (int) How many gradient update after each step
	:param n_episodes_rollout: (int) Update the model every ``n_episodes_rollout`` episodes.
		Note that this cannot be used at the same time as ``train_freq``
	:param action_noise: (ActionNoise) the action noise type (None by default), this can help
		for hard exploration problem. Cf common.noise for the different action noise type.
	:param ent_coef: (str or float) Entropy regularization coefficient. (Equivalent to
		inverse of reward scale in the original SAC paper.)  Controlling exploration/exploitation trade-off.
		Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
	:param target_update_interval: (int) update the target network every ``target_network_update_freq`` steps.
	:param target_entropy: (str or float) target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)
	:param create_eval_env: (bool) Whether to create a second environment that will be
		used for evaluating the agent periodically. (Only available when passing string for the environment)
	:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
	:param verbose: (int) the verbosity level: 0 no output, 1 info, 2 debug
	:param seed: (int) Seed for the pseudo random generators
	:param device: (str or th.device) Device (cpu, cuda, ...) on which the code should be run.
		Setting it to auto, the code will be run on the GPU if possible.
	:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
	"""
    def __init__(self,
                 policy: Union[str, Type[AWACPolicy]],
                 env: Union[GymEnv, str],
                 learning_rate: Union[float, Callable] = 3e-4,
                 buffer_size: int = int(1e6),
                 learning_starts: int = 100,
                 batch_size: int = 256,
                 tau: float = 0.005,
                 gamma: float = 0.99,
                 train_freq: int = 1,
                 gradient_steps: int = 1,
                 n_episodes_rollout: int = -1,
                 action_noise: Optional[ActionNoise] = None,
                 ent_coef: Union[str, float] = 'auto',
                 target_update_interval: int = 1,
                 target_entropy: Union[str, float] = 'auto',
                 awr_use_mle_for_vf: bool = True,
                 beta: int = 50,
                 tensorboard_log: Optional[str] = None,
                 create_eval_env: bool = False,
                 policy_kwargs: Dict[str, Any] = None,
                 verbose: int = 0,
                 seed: Optional[int] = None,
                 device: Union[th.device, str] = 'auto',
                 _init_setup_model: bool = True):

        super().__init__(policy,
                         env,
                         AWACPolicy,
                         learning_rate,
                         buffer_size,
                         learning_starts,
                         batch_size,
                         policy_kwargs,
                         tensorboard_log,
                         verbose,
                         device,
                         create_eval_env=create_eval_env,
                         seed=seed,
                         use_sde=False,
                         sde_sample_freq=-1,
                         use_sde_at_warmup=False)

        self.target_entropy = target_entropy
        self.log_ent_coef = None  # type: Optional[th.Tensor]
        self.target_update_interval = target_update_interval
        self.tau = tau
        # Entropy coefficient / Entropy temperature
        # Inverse of the reward scale
        self.ent_coef = ent_coef
        self.target_update_interval = target_update_interval
        self.train_freq = train_freq
        self.gradient_steps = gradient_steps
        self.n_episodes_rollout = n_episodes_rollout
        self.action_noise = action_noise
        self.gamma = gamma
        self.ent_coef_optimizer = None
        self.awr_use_mle_for_vf = awr_use_mle_for_vf
        self.beta = beta
        self.bc_buffer = None

        if _init_setup_model:
            self._setup_model()

    def _setup_model(self) -> None:
        super()._setup_model()
        self._create_aliases()

        # Target entropy is used when learning the entropy coefficient
        if self.target_entropy == 'auto':
            # automatically set target entropy if needed
            self.target_entropy = -np.prod(self.env.action_space.shape).astype(
                np.float32)
        else:
            # Force conversion
            # this will also throw an error for unexpected string
            self.target_entropy = float(self.target_entropy)

        # The entropy coefficient or entropy can be learned automatically
        # see Automating Entropy Adjustment for Maximum Entropy RL section
        # of https://arxiv.org/abs/1812.05905
        if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'):
            # Default initial value of ent_coef when learned
            init_value = 1.0
            if '_' in self.ent_coef:
                init_value = float(self.ent_coef.split('_')[1])
                assert init_value > 0., "The initial value of ent_coef must be greater than 0"

            # Note: we optimize the log of the entropy coeff which is slightly different from the paper
            # as discussed in https://github.com/rail-berkeley/softlearning/issues/37
            self.log_ent_coef = th.log(
                th.ones(1, device=self.device) *
                init_value).requires_grad_(True)
            self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef],
                                                    lr=self.lr_schedule(1))
        else:
            # Force conversion to float
            # this will throw an error if a malformed string (different from 'auto')
            # is passed
            self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(
                self.device)

        self.bc_buffer = ReplayBuffer(int(1e4), self.observation_space,
                                      self.action_space, self.device)

    def _create_aliases(self) -> None:
        self.actor = self.policy.actor
        self.critic = self.policy.critic
        self.critic_target = self.policy.critic_target

    def pretrain_bc(self, gradient_steps: int, batch_size: int = 64):
        statistics = []
        with trange(gradient_steps) as t:
            for gradient_step in t:
                replay_data = self.bc_buffer.sample(
                    batch_size, env=self._vec_normalize_env)
                dist = self.actor(replay_data.observations)
                actions_pi, log_prob = dist.log_prob_and_rsample()
                actor_loss = -log_prob.mean()
                actor_mse_loss = F.mse_loss(actions_pi.detach(),
                                            replay_data.actions)

                self.actor.optimizer.zero_grad()
                actor_loss.backward()
                self.actor.optimizer.step()

                statistics.append((actor_loss.item(), actor_mse_loss.item()))
                t.set_postfix(mse_loss=actor_mse_loss.item(),
                              policy_loss=actor_loss.item())
        actor_losses, mse_losses = tuple(zip(*statistics))

        logger.record("pretrain/n_updates",
                      self._n_updates,
                      exclude='tensorboard')
        logger.record("pretrain/actor_loss", np.mean(actor_losses))
        logger.record("pretrain/actor_mse_loss", np.mean(mse_losses))

    def pretrain_rl(self, gradient_steps: int, batch_size: int = 64) -> None:
        statistics = []
        with trange(gradient_steps) as t:
            for gradient_step in t:
                replay_data = self.replay_buffer.sample(
                    batch_size, env=self._vec_normalize_env)
                stats = self.train_batch(replay_data)
                statistics.append(stats)
                self._n_updates += 1
                t.set_postfix(qf_loss=stats[1], policy_loss=stats[0])
        actor_losses, critic_losses, ent_coef_losses, ent_coefs = tuple(
            zip(*statistics))

        logger.record("pretrain/n_updates",
                      self._n_updates,
                      exclude='tensorboard')
        logger.record("pretrain/ent_coef", np.mean(ent_coefs))
        logger.record("pretrain/actor_loss", np.mean(actor_losses))
        logger.record("pretrain/critic_loss", np.mean(critic_losses))
        logger.record("pretrain/ent_coef_loss", np.mean(ent_coef_losses))

    def train(self, gradient_steps: int, batch_size: int = 64) -> None:
        statistics = []
        for gradient_step in range(gradient_steps):
            replay_data = self.replay_buffer.sample(
                batch_size, env=self._vec_normalize_env)
            stats = self.train_batch(replay_data)
            statistics.append(stats)
            self._n_updates += 1
        actor_losses, critic_losses, ent_coef_losses, ent_coefs = tuple(
            zip(*statistics))

        logger.record("train/n_updates",
                      self._n_updates,
                      exclude='tensorboard')
        logger.record("train/ent_coef", np.mean(ent_coefs))
        logger.record("train/actor_loss", np.mean(actor_losses))
        logger.record("train/critic_loss", np.mean(critic_losses))
        logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))

    def train_batch(self, replay_data):
        # Action by the current actor for the sampled state
        dist = self.actor(replay_data.observations)
        actions_pi, log_prob = dist.log_prob_and_rsample()
        actor_mle = dist.mean
        """ent_coeff loss"""
        ent_coef_loss = None
        if self.ent_coef_optimizer is not None:
            # Important: detach the variable from the graph
            # so we don't change it with other losses
            # see https://github.com/rail-berkeley/softlearning/issues/60
            ent_coef = th.exp(self.log_ent_coef.detach())
            ent_coef_loss = -(
                self.log_ent_coef *
                (log_prob + self.target_entropy).detach()).mean()
        else:
            ent_coef = self.ent_coef_tensor
        """q loss"""
        with th.no_grad():
            # Select action according to policy
            next_dist = self.actor(replay_data.next_observations)
            next_actions, next_log_prob = next_dist.log_prob_and_rsample()
            # Compute the target Q value
            target_q1, target_q2 = self.critic_target(
                replay_data.next_observations, next_actions)
            target_q = th.min(target_q1, target_q2)
            target_q = replay_data.rewards + (
                1 - replay_data.dones) * self.gamma * target_q
            # td error + entropy term
            # q_backup = target_q - ent_coef * next_log_prob
            q_backup = target_q
        # Get current Q estimates
        # using action from the replay buffer
        current_q1, current_q2 = self.critic(replay_data.observations,
                                             replay_data.actions)
        # Compute critic loss
        critic_loss = 0.5 * (F.mse_loss(current_q1, q_backup) +
                             F.mse_loss(current_q2, q_backup))
        """action loss"""
        # Advantage-weighted regression
        # if self.awr_use_mle_for_vf:
        # 	v1_pi,v2_pi = self.critic(replay_data.observations, actor_mle)
        # 	v_pi = th.min(v1_pi, v2_pi)
        # else:
        # 	v1_pi,v2_pi = self.critic(replay_data.observations, actions_pi)
        # 	v_pi = th.min(v1_pi, v2_pi)
        q_adv = th.min(current_q1, current_q2)
        v1_pi, v2_pi = self.critic(replay_data.observations, actor_mle)
        v_pi = th.min(v1_pi, v2_pi)
        # q_adv = th.min(*self.critic(replay_data.observations, actions_pi))
        score = q_adv - v_pi
        weights = F.softmax(score / self.beta, dim=0)
        # actor_loss = ent_coef * log_prob.mean()
        actor_logpp = dist.log_prob(replay_data.actions)
        actor_loss = (-actor_logpp * len(weights) * weights.detach()).mean()
        """Updates"""
        # Optimize entropy coefficient, also called
        # entropy temperature or alpha in the paper
        if ent_coef_loss is not None:
            self.ent_coef_optimizer.zero_grad()
            ent_coef_loss.backward()
            self.ent_coef_optimizer.step()
        # Optimize the critic
        self.critic.optimizer.zero_grad()
        critic_loss.backward()
        self.critic.optimizer.step()
        # Optimize the actor
        self.actor.optimizer.zero_grad()
        actor_loss.backward()
        self.actor.optimizer.step()

        # Update target networks
        if self._n_updates % self.target_update_interval == 0:
            for param, target_param in zip(self.critic.parameters(),
                                           self.critic_target.parameters()):
                target_param.data.copy_(self.tau * param.data +
                                        (1 - self.tau) * target_param.data)

        if ent_coef_loss is None:
            ent_coef_loss = th.tensor([0])
        return actor_loss.item(), critic_loss.item(), ent_coef_loss.item(
        ), ent_coef.item()

    def learn(self,
              total_timesteps: int,
              callback: MaybeCallback = None,
              log_interval: int = 4,
              eval_env: Optional[GymEnv] = None,
              eval_freq: int = -1,
              n_eval_episodes: int = 5,
              tb_log_name: str = "AWAC",
              eval_log_path: Optional[str] = None,
              reset_num_timesteps: bool = True) -> OffPolicyAlgorithm:

        total_timesteps, callback = self._setup_learn(
            total_timesteps, eval_env, callback, eval_freq, n_eval_episodes,
            eval_log_path, reset_num_timesteps, tb_log_name)
        callback.on_training_start(locals(), globals())

        self.pretrain_bc(int(1e3), batch_size=self.batch_size)
        observations, actions, next_observations, rewards, dones = self.bc_buffer.observations, self.bc_buffer.actions, self.bc_buffer.next_observations, self.bc_buffer.rewards, self.bc_buffer.dones
        for data in zip(observations, next_observations, actions, rewards,
                        dones):
            self.replay_buffer.add(*data)
        self.pretrain_rl(int(1e4), batch_size=self.batch_size)

        while self.num_timesteps < total_timesteps:
            rollout = self.collect_rollouts(
                self.env,
                n_episodes=self.n_episodes_rollout,
                n_steps=self.train_freq,
                action_noise=self.action_noise,
                callback=callback,
                learning_starts=self.learning_starts,
                replay_buffer=self.replay_buffer,
                log_interval=log_interval)

            if rollout.continue_training is False:
                break

            self._update_current_progress_remaining(self.num_timesteps,
                                                    total_timesteps)

            if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts:
                gradient_steps = self.gradient_steps if self.gradient_steps > 0 else rollout.episode_timesteps
                self.train(gradient_steps, batch_size=self.batch_size)

        callback.on_training_end()
        return self

    def excluded_save_params(self) -> List[str]:
        """
		Returns the names of the parameters that should be excluded by default
		when saving the model.

		:return: (List[str]) List of parameters that should be excluded from save
		"""
        # Exclude aliases
        return super().excluded_save_params() + [
            "actor", "critic", "critic_target"
        ]

    def get_torch_variables(self) -> Tuple[List[str], List[str]]:
        """
		cf base class
		"""
        state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
        saved_tensors = ['log_ent_coef']
        if self.ent_coef_optimizer is not None:
            state_dicts.append('ent_coef_optimizer')
        else:
            saved_tensors.append('ent_coef_tensor')
        return state_dicts, saved_tensors
예제 #3
0
class CMVCVaRSAC(OffPolicyAlgorithm):
    def __init__(
        self,
        policy: Union[str, Type[CMVC51SACPolicy]],
        env: Union[GymEnv, str],
        min_v: float = -25,
        max_v: float = +25,
        support_dim: int = 200,
        learning_rate: Union[float, Schedule] = 3e-4,
        buffer_size: int = int(5e4),
        learning_starts: int = 100,
        batch_size: int = 64,
        tau: float = 0.005,
        gamma: float = 0.99,
        train_freq: Union[int, Tuple[int, str]] = 1,
        gradient_steps: int = 1,
        action_noise: Optional[ActionNoise] = None,
        optimize_memory_usage: bool = False,
        ent_coef: Union[str, float] = "auto",
        target_update_interval: int = 1,
        target_entropy: Union[str, float] = "auto",
        use_sde: bool = False,
        sde_sample_freq: int = -1,
        use_sde_at_warmup: bool = False,
        tensorboard_log: Optional[str] = None,
        create_eval_env: bool = False,
        policy_kwargs: Dict[str, Any] = None,
        verbose: int = 0,
        seed: Optional[int] = None,
        device: Union[th.device, str] = "auto",
        _init_setup_model: bool = True,
        cvar_alpha=0.3,
        cmv_beta=1,
    ):
        super(CMVCVaRSAC, self).__init__(
            policy,
            env,
            CMVC51SACPolicy,
            learning_rate,
            buffer_size,
            learning_starts,
            batch_size,
            tau,
            gamma,
            train_freq,
            gradient_steps,
            action_noise,
            policy_kwargs=policy_kwargs,
            tensorboard_log=tensorboard_log,
            verbose=verbose,
            device=device,
            create_eval_env=create_eval_env,
            seed=seed,
            use_sde=use_sde,
            sde_sample_freq=sde_sample_freq,
            use_sde_at_warmup=use_sde_at_warmup,
            optimize_memory_usage=optimize_memory_usage,
            supported_action_spaces=(gym.spaces.Box),
        )

        self.target_entropy = target_entropy
        self.log_ent_coef = None  # type: Optional[th.Tensor]
        # Entropy coefficient / Entropy temperature
        # Inverse of the reward scale
        self.ent_coef = ent_coef
        self.target_update_interval = target_update_interval
        self.ent_coef_optimizer = None
        self.min_v = min_v
        self.max_v = max_v
        self.support_dim = support_dim
        self.interval = (1 / (support_dim - 1)) * (max_v - min_v)
        self.supports = th.from_numpy(
            np.array([min_v + i * self.interval for i in range(support_dim)],
                     dtype=np.float32)).to(self.device)
        self._total_timesteps = None
        self.cvar_alpha = cvar_alpha
        self.cmv_beta = cmv_beta
        if _init_setup_model:
            self._setup_model()

    def _setup_model(self) -> None:
        self._setup_lr_schedule()
        self.set_random_seed(self.seed)
        self.replay_buffer = ReplayBuffer(
            self.buffer_size,
            self.observation_space,
            self.action_space,
            self.device,
            optimize_memory_usage=self.optimize_memory_usage,
        )
        self.policy = self.policy_class(
            self.observation_space,
            self.action_space,
            self.support_dim,
            self.lr_schedule,
            **self.policy_kwargs,  # pytype:disable=not-instantiable
        )
        self.policy = self.policy.to(self.device)

        # Convert train freq parameter to TrainFreq object
        self._convert_train_freq()
        self._create_aliases()
        # Target entropy is used when learning the entropy coefficient
        if self.target_entropy == "auto":
            # automatically set target entropy if needed
            self.target_entropy = -np.prod(self.env.action_space.shape).astype(
                np.float32)
        else:
            # Force conversion
            # this will also throw an error for unexpected string
            self.target_entropy = float(self.target_entropy)

        # The entropy coefficient or entropy can be learned automatically
        # see Automating Entropy Adjustment for Maximum Entropy RL section
        # of https://arxiv.org/abs/1812.05905
        if isinstance(self.ent_coef, str) and self.ent_coef.startswith("auto"):
            # Default initial value of ent_coef when learned
            init_value = 1.0
            if "_" in self.ent_coef:
                init_value = float(self.ent_coef.split("_")[1])
                assert init_value > 0.0, "The initial value of ent_coef must be greater than 0"

            # Note: we optimize the log of the entropy coeff which is slightly different from the paper
            # as discussed in https://github.com/rail-berkeley/softlearning/issues/37
            self.log_ent_coef = th.log(
                th.ones(1, device=self.device) *
                init_value).requires_grad_(True)
            self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef],
                                                    lr=self.lr_schedule(1))
        else:
            # Force conversion to float
            # this will throw an error if a malformed string (different from 'auto')
            # is passed
            self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(
                self.device)

    def _create_aliases(self) -> None:
        self.actor = self.policy.actor
        self.critic = self.policy.critic
        self.critic_target = self.policy.critic_target
        self.cmv_net = self.policy.cmv_net
        self.beta_critic = self.policy.beta_critic
        self.beta_critic_target = self.policy.beta_critic_target

    def projection(self, support_rows, target_z):
        projected_target_z = th.zeros_like(target_z)
        support_rows = support_rows.clamp(self.min_v, self.max_v - 1e-3)
        p = ((support_rows - self.min_v) % self.interval) / self.interval
        idx = ((support_rows - self.min_v) // self.interval).long()
        projected_target_z = projected_target_z.scatter_add(
            1, idx, target_z * p)
        projected_target_z = projected_target_z.scatter_add(
            1, idx + 1, target_z * (1 - p))

        return projected_target_z

    def train(self, gradient_steps: int, batch_size: int = 64) -> None:
        # Update optimizers learning rate
        optimizers = [self.actor.optimizer, self.critic.optimizer]
        if self.ent_coef_optimizer is not None:
            optimizers += [self.ent_coef_optimizer]

        # Update learning rate according to lr schedule
        self._update_learning_rate(optimizers)

        ent_coef_losses, ent_coefs = [], []
        actor_losses, critic_losses, reward_losses, feature_pred_losses = [], [], [], []
        cvars = []
        qs = []
        critic_beta_losses = []

        for gradient_step in range(gradient_steps):
            # Sample replay buffer
            replay_data = self.replay_buffer.sample(
                batch_size, env=self._vec_normalize_env)

            # We need to sample because `log_std` may have changed between two gradient steps
            if self.use_sde:
                self.actor.reset_noise()

            # Action by the current actor for the sampled state
            actions_pi, log_prob = self.actor.action_log_prob(
                replay_data.observations)
            log_prob = log_prob.reshape(-1, 1)

            ent_coef_loss = None
            if self.ent_coef_optimizer is not None:
                # Important: detach the variable from the graph
                # so we don't change it with other losses
                # see https://github.com/rail-berkeley/softlearning/issues/60
                ent_coef = th.exp(self.log_ent_coef.detach())
                ent_coef_loss = -(
                    self.log_ent_coef *
                    (log_prob + self.target_entropy).detach()).mean()
                ent_coef_losses.append(ent_coef_loss.item())
            else:
                ent_coef = self.ent_coef_tensor

            ent_coefs.append(ent_coef.item())

            # Optimize entropy coefficient, also called
            # entropy temperature or alpha in the paper
            if ent_coef_loss is not None:
                self.ent_coef_optimizer.zero_grad()
                ent_coef_loss.backward()
                self.ent_coef_optimizer.step()

            with th.no_grad():
                # Select action according to policy
                next_actions, next_log_prob = self.actor.action_log_prob(
                    replay_data.next_observations)
                # Compute the next Q values: min over all critics targets
                target_zs = th.cat(self.critic_target(
                    replay_data.next_observations, next_actions),
                                   dim=1)
                # add entropy term
                target_supports = self.supports.clone().detach()
                target_supports = target_supports - ent_coef * next_log_prob.reshape(
                    -1, 1)
                # td error + entropy term
                target_supports = replay_data.rewards + (
                    1 - replay_data.dones) * self.gamma * target_supports

            # Get current Q-values estimates for each critic network
            # using action from the replay buffer
            current_zs = th.cat(self.critic(replay_data.observations,
                                            replay_data.actions),
                                dim=1)
            target_zs = self.projection(target_supports, target_zs)
            # Compute critic loss

            critic_loss = -th.mean(th.log(current_zs + 1e-12) * target_zs)
            critic_losses.append(critic_loss.item())

            # Optimize the critic
            self.critic.optimizer.zero_grad()
            critic_loss.backward()
            self.critic.optimizer.step()

            # Compute
            # For the CMV Learning
            with th.no_grad():
                next_z = self.critic_target.features_extractor(
                    replay_data.next_observations)
            # predicted reward, and predicted next observation
            r_pred, z_pred = self.cmv_net(replay_data.observations,
                                          replay_data.actions)
            mse_r_pred = th.mean(th.square(r_pred - replay_data.rewards))
            mse_z_pred = th.mean(th.square(z_pred - next_z))
            loss_cmv = mse_r_pred + mse_z_pred

            reward_losses.append(mse_r_pred.item())
            feature_pred_losses.append(mse_z_pred.item())

            # Optimize the CMV Nets
            self.cmv_net.optimizer.zero_grad()
            loss_cmv.backward()
            self.cmv_net.optimizer.step()

            with th.no_grad():
                # Select action according to policy
                # Compute the next Q values: min over all critics targets
                next_q_beta_values = th.cat(self.beta_critic_target(
                    replay_data.next_observations, next_actions),
                                            dim=1)
                next_q_beta_values, _ = th.min(next_q_beta_values,
                                               dim=1,
                                               keepdim=True)
                # add entropy term
                next_q_beta_values = next_q_beta_values - ent_coef * next_log_prob.reshape(
                    -1, 1)
                # td error + entropy term
                target_q_beta_values = loss_cmv.detach() + (
                    1 - replay_data.dones) * (self.gamma**
                                              2) * next_q_beta_values

            # Get current Q-beta values estimates for each critic network
            # using action from the replay buffer
            current_q_beta_values = self.beta_critic(replay_data.observations,
                                                     replay_data.actions)

            # Compute critic beta loss
            critic_beta_loss = \
                0.5 * sum(
                    [F.mse_loss(current_q_beta, target_q_beta_values) for current_q_beta in current_q_beta_values])
            critic_beta_losses.append(critic_beta_loss.item())

            # Optimize the critic beta
            self.beta_critic.optimizer.zero_grad()
            critic_beta_loss.backward()
            self.beta_critic.optimizer.step()

            # Compute actor loss
            # Alternative: actor_loss = th.mean(log_prob - qf1_pi - qf1_beta_pi)
            # Mean over all critic networks
            z_pi = th.cat(self.critic.forward(replay_data.observations,
                                              actions_pi),
                          dim=1)

            z_cdf = th.cumsum(z_pi, dim=-1)
            adjust_pdf = th.where(th.le(z_cdf, self.cvar_alpha), z_pi,
                                  th.zeros_like(z_pi))
            adjust_pdf = th.div(adjust_pdf,
                                th.sum(adjust_pdf, dim=-1, keepdim=True))
            q_pi = adjust_pdf @ self.supports
            cvars.append(th.mean(q_pi).item())
            qs.append(th.mean(z_pi @ self.supports).item())
            q_beta_values_pi = th.cat(self.beta_critic.forward(
                replay_data.observations, actions_pi),
                                      dim=1)
            max_qf_beta_pi, _ = th.max(q_beta_values_pi, dim=1, keepdim=True)
            actor_loss = (ent_coef * log_prob - q_pi +
                          self.cmv_beta * next_q_beta_values).mean()
            actor_losses.append(actor_loss.item())

            # Optimize the actor
            self.actor.optimizer.zero_grad()
            actor_loss.backward()
            self.actor.optimizer.step()

            # Update target networks
            if gradient_step % self.target_update_interval == 0:
                polyak_update(self.critic.parameters(),
                              self.critic_target.parameters(), self.tau)
                polyak_update(self.beta_critic.parameters(),
                              self.beta_critic_target.parameters(), self.tau)

        self._n_updates += gradient_steps
        fps = int(self.num_timesteps / (time.time() - self.start_time))
        remaining_steps = self._total_timesteps - self.num_timesteps

        eta = int(round(remaining_steps / fps))
        logger.record("time/eta",
                      timedelta(seconds=eta),
                      exclude="tensorboard")
        logger.record("train/CVaR Alpha", self.cvar_alpha)
        logger.record("train/CMV Beta", self.cmv_beta)
        logger.record("train/CVaR", np.mean(cvars))
        logger.record("train/Q-value", np.mean(qs))
        logger.record("train/n_updates",
                      self._n_updates,
                      exclude="tensorboard")
        logger.record("train/ent_coef", np.mean(ent_coefs))
        logger.record("train/actor_loss", np.mean(actor_losses))
        logger.record("train/critic_loss", np.mean(critic_losses))
        logger.record("train/reward error", np.mean(reward_losses))
        logger.record("train/s_t+1_error", np.mean(feature_pred_losses))
        logger.record("train/beta_Q_loss", np.mean(critic_beta_losses))
        if len(ent_coef_losses) > 0:
            logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))

    def learn(
        self,
        total_timesteps: int,
        callback: MaybeCallback = None,
        log_interval: int = 4,
        eval_env: Optional[GymEnv] = None,
        eval_freq: int = -1,
        n_eval_episodes: int = 5,
        tb_log_name: str = "C51SAC",
        eval_log_path: Optional[str] = None,
        reset_num_timesteps: bool = True,
    ) -> OffPolicyAlgorithm:

        return super(CMVCVaRSAC, self).learn(
            total_timesteps=total_timesteps,
            callback=callback,
            log_interval=log_interval,
            eval_env=eval_env,
            eval_freq=eval_freq,
            n_eval_episodes=n_eval_episodes,
            tb_log_name=tb_log_name,
            eval_log_path=eval_log_path,
            reset_num_timesteps=reset_num_timesteps,
        )

    def _excluded_save_params(self) -> List[str]:
        return super(CMVCVaRSAC, self)._excluded_save_params() + [
            "actor", "critic", "critic_target"
        ]

    def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
        state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
        saved_pytorch_variables = ["log_ent_coef"]
        if self.ent_coef_optimizer is not None:
            state_dicts.append("ent_coef_optimizer")
        else:
            saved_pytorch_variables.append("ent_coef_tensor")
        return state_dicts, saved_pytorch_variables