def _log_histogram_and_mean(self, log_key, val): try: SummaryWriterContext.add_histogram(log_key, val) SummaryWriterContext.add_scalar(f"{log_key}/mean", val.mean()) except ValueError: logger.warning( f"Cannot create histogram for key: {log_key}; " "this is likely because you have NULL value in your input; " f"value: {val}") raise
def get_log_prob(self, state, squashed_action): """ Action is expected to be squashed with tanh """ loc, scale_log = self._get_loc_and_scale_log(state) # This is not getting exported; we can use it n = Normal(loc, scale_log.exp()) raw_action = self._atanh(squashed_action) log_prob = n.log_prob(raw_action) squash_correction = self._squash_correction(squashed_action) if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram("actor/get_log_prob/loc", loc.detach().cpu()) SummaryWriterContext.add_histogram("actor/get_log_prob/scale_log", scale_log.detach().cpu()) SummaryWriterContext.add_histogram("actor/get_log_prob/log_prob", log_prob.detach().cpu()) SummaryWriterContext.add_histogram( "actor/get_log_prob/squash_correction", squash_correction.detach().cpu()) log_prob = torch.sum(log_prob - squash_correction, dim=1).reshape(-1, 1) return log_prob
def forward(self, input): loc, scale_log = self._get_loc_and_scale_log(input.state) r = torch.randn_like(scale_log, device=scale_log.device) action = torch.tanh(loc + r * scale_log.exp()) if not self.training: # ONNX doesn't like reshape either.. return rlt.ActorOutput(action=action) # Since each dim are independent, log-prob is simply sum log_prob = self._log_prob(r, scale_log) squash_correction = self._squash_correction(action) if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram("actor/forward/loc", loc.detach().cpu()) SummaryWriterContext.add_histogram( "actor/forward/scale_log", scale_log.detach().cpu() ) SummaryWriterContext.add_histogram( "actor/forward/log_prob", log_prob.detach().cpu() ) SummaryWriterContext.add_histogram( "actor/forward/squash_correction", squash_correction.detach().cpu() ) log_prob = torch.sum(log_prob - squash_correction, dim=1) return rlt.ActorOutput( action=action, log_prob=log_prob.reshape(-1, 1), action_mean=loc )
def get_log_prob(self, state: rlt.FeatureData, squashed_action: torch.Tensor): """ Action is expected to be squashed with tanh """ if self.use_l2_normalization: # TODO: calculate log_prob for l2 normalization # https://math.stackexchange.com/questions/3120506/on-the-distribution-of-a-normalized-gaussian-vector # http://proceedings.mlr.press/v100/mazoure20a/mazoure20a.pdf pass loc, scale_log = self._get_loc_and_scale_log(state) raw_action = torch.atanh(squashed_action) r = (raw_action - loc) / scale_log.exp() log_prob = self._normal_log_prob(r, scale_log) squash_correction = self._squash_correction(squashed_action) if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram("actor/get_log_prob/loc", loc.detach().cpu()) SummaryWriterContext.add_histogram("actor/get_log_prob/scale_log", scale_log.detach().cpu()) SummaryWriterContext.add_histogram("actor/get_log_prob/log_prob", log_prob.detach().cpu()) SummaryWriterContext.add_histogram( "actor/get_log_prob/squash_correction", squash_correction.detach().cpu()) return torch.sum(log_prob - squash_correction, dim=1).reshape(-1, 1)
def forward(self, state: rlt.FeatureData): loc, scale_log = self._get_loc_and_scale_log(state) r = torch.randn_like(scale_log, device=scale_log.device) raw_action = loc + r * scale_log.exp() squashed_action = self._squash_raw_action(raw_action) squashed_loc = self._squash_raw_action(loc) if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram("actor/forward/loc", loc.detach().cpu()) SummaryWriterContext.add_histogram("actor/forward/scale_log", scale_log.detach().cpu()) return rlt.ActorOutput( action=squashed_action, log_prob=self.get_log_prob(state, squashed_action), squashed_mean=squashed_loc, )
def _sample_action(self, loc: torch.Tensor, scale_log: torch.Tensor): r = torch.randn_like(scale_log, device=scale_log.device) action = torch.tanh(loc + r * scale_log.exp()) # Since each dim are independent, log-prob is simply sum log_prob = self.actor_network._log_prob(r, scale_log) squash_correction = self.actor_network._squash_correction(action) if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram("actor/forward/loc", loc.detach().cpu()) SummaryWriterContext.add_histogram("actor/forward/scale_log", scale_log.detach().cpu()) SummaryWriterContext.add_histogram("actor/forward/log_prob", log_prob.detach().cpu()) SummaryWriterContext.add_histogram( "actor/forward/squash_correction", squash_correction.detach().cpu()) log_prob = torch.sum(log_prob - squash_correction, dim=1) return action, log_prob
def _log_prob(self, loc: torch.Tensor, scale_log: torch.Tensor, squashed_action: torch.Tensor): # This is not getting exported; we can use it n = torch.distributions.Normal(loc, scale_log.exp()) raw_action = self.actor_network._atanh(squashed_action) log_prob = n.log_prob(raw_action) squash_correction = self.actor_network._squash_correction( squashed_action) if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram("actor/get_log_prob/loc", loc.detach().cpu()) SummaryWriterContext.add_histogram("actor/get_log_prob/scale_log", scale_log.detach().cpu()) SummaryWriterContext.add_histogram("actor/get_log_prob/log_prob", log_prob.detach().cpu()) SummaryWriterContext.add_histogram( "actor/get_log_prob/squash_correction", squash_correction.detach().cpu()) log_prob = torch.sum(log_prob - squash_correction, dim=1) return log_prob
def train(self, training_batch: rlt.PolicyNetworkInput) -> None: """ IMPORTANT: the input action here is assumed to be preprocessed to match the range of the output of the actor. """ assert isinstance(training_batch, rlt.PolicyNetworkInput) self.minibatch += 1 state = training_batch.state action = training_batch.action next_state = training_batch.next_state reward = training_batch.reward not_terminal = training_batch.not_terminal # Generate target = r + y * min (Q1(s',pi(s')), Q2(s',pi(s'))) with torch.no_grad(): next_actor = self.actor_network_target(next_state).action noise = torch.randn_like(next_actor) * self.noise_variance next_actor = (next_actor + noise.clamp(*self.noise_clip_range)).clamp( *CONTINUOUS_TRAINING_ACTION_RANGE) next_state_actor = (next_state, rlt.FeatureData(next_actor)) next_q_value = self.q1_network_target(*next_state_actor) if self.q2_network is not None: next_q_value = torch.min( next_q_value, self.q2_network_target(*next_state_actor)) target_q_value = reward + self.gamma * next_q_value * not_terminal.float( ) # Optimize Q1 and Q2 # NOTE: important to zero here (instead of using _maybe_update) # since q1 may have accumulated gradients from actor network update self.q1_network_optimizer.zero_grad() q1_value = self.q1_network(state, action) q1_loss = self.q_network_loss(q1_value, target_q_value) q1_loss.backward() self.q1_network_optimizer.step() if self.q2_network: self.q2_network_optimizer.zero_grad() q2_value = self.q2_network(state, action) q2_loss = self.q_network_loss(q2_value, target_q_value) q2_loss.backward() self.q2_network_optimizer.step() # Only update actor and target networks after a fixed number of Q updates if self.minibatch % self.delayed_policy_update == 0: self.actor_network_optimizer.zero_grad() actor_action = self.actor_network(state).action actor_q1_value = self.q1_network(state, rlt.FeatureData(actor_action)) actor_loss = -(actor_q1_value.mean()) actor_loss.backward() self.actor_network_optimizer.step() self._soft_update(self.q1_network, self.q1_network_target, self.tau) self._soft_update(self.q2_network, self.q2_network_target, self.tau) self._soft_update(self.actor_network, self.actor_network_target, self.tau) # Logging at the end to schedule all the cuda operations first if (self.tensorboard_logging_freq != 0 and self.minibatch % self.tensorboard_logging_freq == 0): logs = { "loss/q1_loss": q1_loss, "loss/actor_loss": actor_loss, "q_value/q1_value": q1_value, "q_value/next_q_value": next_q_value, "q_value/target_q_value": target_q_value, "q_value/actor_q1_value": actor_q1_value, } if self.q2_network: logs.update({ "loss/q2_loss": q2_loss, "q_value/q2_value": q2_value }) for k, v in logs.items(): v = v.detach().cpu() if v.dim() == 0: # pyre-fixme[16]: `SummaryWriterContext` has no attribute # `add_scalar`. SummaryWriterContext.add_scalar(k, v.item()) continue elif v.dim() == 2: v = v.squeeze(1) assert v.dim() == 1 SummaryWriterContext.add_histogram(k, v.numpy()) SummaryWriterContext.add_scalar(f"{k}_mean", v.mean().item()) self.loss_reporter.report( td_loss=float(q1_loss), reward_loss=None, logged_rewards=reward, model_values_on_logged_actions=q1_value, )
def train(self, training_batch: rlt.PolicyNetworkInput) -> None: """ IMPORTANT: the input action here is assumed to match the range of the output of the actor. """ if isinstance(training_batch, TrainingDataPage): training_batch = training_batch.as_policy_network_training_batch() assert isinstance(training_batch, rlt.PolicyNetworkInput) self.minibatch += 1 state = training_batch.state action = training_batch.action reward = training_batch.reward discount = torch.full_like(reward, self.gamma) not_done_mask = training_batch.not_terminal # We need to zero out grad here because gradient from actor update # should not be used in Q-network update self.actor_network_optimizer.zero_grad() self.q1_network_optimizer.zero_grad() if self.q2_network is not None: self.q2_network_optimizer.zero_grad() if self.value_network is not None: self.value_network_optimizer.zero_grad() with torch.enable_grad(): # # First, optimize Q networks; minimizing MSE between # Q(s, a) & r + discount * V'(next_s) # q1_value = self.q1_network(state, action) if self.q2_network: q2_value = self.q2_network(state, action) actor_output = self.actor_network(state) # Optimize Alpha if self.alpha_optimizer is not None: alpha_loss = -((self.log_alpha * (actor_output.log_prob + self.target_entropy).detach()).mean()) self.alpha_optimizer.zero_grad() alpha_loss.backward() self.alpha_optimizer.step() self.entropy_temperature = self.log_alpha.exp() with torch.no_grad(): if self.value_network is not None: next_state_value = self.value_network_target( training_batch.next_state.float_features) else: next_state_actor_output = self.actor_network( training_batch.next_state) next_state_actor_action = ( training_batch.next_state, rlt.FeatureData(next_state_actor_output.action), ) next_state_value = self.q1_network_target( *next_state_actor_action) if self.q2_network is not None: target_q2_value = self.q2_network_target( *next_state_actor_action) next_state_value = torch.min(next_state_value, target_q2_value) log_prob_a = self.actor_network.get_log_prob( training_batch.next_state, next_state_actor_output.action) log_prob_a = log_prob_a.clamp(-20.0, 20.0) next_state_value -= self.entropy_temperature * log_prob_a if self.gamma > 0.0: target_q_value = ( reward + discount * next_state_value * not_done_mask.float()) else: # This is useful in debugging instability issues target_q_value = reward q1_loss = F.mse_loss(q1_value, target_q_value) q1_loss.backward() self._maybe_run_optimizer(self.q1_network_optimizer, self.minibatches_per_step) if self.q2_network: # pyre-fixme[18]: Global name `q2_value` is undefined. q2_loss = F.mse_loss(q2_value, target_q_value) q2_loss.backward() self._maybe_run_optimizer(self.q2_network_optimizer, self.minibatches_per_step) # Second, optimize the actor; minimizing KL-divergence between # propensity & softmax of value. Due to reparameterization trick, # it ends up being log_prob(actor_action) - Q(s, actor_action) state_actor_action = (state, rlt.FeatureData(actor_output.action)) q1_actor_value = self.q1_network(*state_actor_action) min_q_actor_value = q1_actor_value if self.q2_network: q2_actor_value = self.q2_network(*state_actor_action) min_q_actor_value = torch.min(q1_actor_value, q2_actor_value) actor_loss = (self.entropy_temperature * actor_output.log_prob - min_q_actor_value) # Do this in 2 steps so we can log histogram of actor loss actor_loss_mean = actor_loss.mean() if self.add_kld_to_loss: if self.apply_kld_on_mean: action_batch_m = torch.mean(actor_output.action_mean, axis=0) action_batch_v = torch.var(actor_output.action_mean, axis=0) else: action_batch_m = torch.mean(actor_output.action, axis=0) action_batch_v = torch.var(actor_output.action, axis=0) kld = ( 0.5 # pyre-fixme[16]: `int` has no attribute `sum`. * ((action_batch_v + (action_batch_m - self.action_emb_mean)**2) / self.action_emb_variance - 1 + self.action_emb_variance.log() - action_batch_v.log()).sum()) actor_loss_mean += self.kld_weight * kld actor_loss_mean.backward() self._maybe_run_optimizer(self.actor_network_optimizer, self.minibatches_per_step) # # Lastly, if applicable, optimize value network; minimizing MSE between # V(s) & E_a~pi(s) [ Q(s,a) - log(pi(a|s)) ] # if self.value_network is not None: state_value = self.value_network(state.float_features) if self.logged_action_uniform_prior: log_prob_a = torch.zeros_like(min_q_actor_value) target_value = min_q_actor_value else: with torch.no_grad(): log_prob_a = actor_output.log_prob log_prob_a = log_prob_a.clamp(-20.0, 20.0) target_value = (min_q_actor_value - self.entropy_temperature * log_prob_a) value_loss = F.mse_loss(state_value, target_value.detach()) value_loss.backward() self._maybe_run_optimizer(self.value_network_optimizer, self.minibatches_per_step) # Use the soft update rule to update the target networks if self.value_network is not None: self._maybe_soft_update( self.value_network, self.value_network_target, self.tau, self.minibatches_per_step, ) else: self._maybe_soft_update( self.q1_network, self.q1_network_target, self.tau, self.minibatches_per_step, ) if self.q2_network is not None: self._maybe_soft_update( self.q2_network, self.q2_network_target, self.tau, self.minibatches_per_step, ) # Logging at the end to schedule all the cuda operations first if (self.tensorboard_logging_freq != 0 and self.minibatch % self.tensorboard_logging_freq == 0): SummaryWriterContext.add_histogram("q1/logged_state_value", q1_value) if self.q2_network: SummaryWriterContext.add_histogram("q2/logged_state_value", q2_value) # pyre-fixme[16]: `SummaryWriterContext` has no attribute `add_scalar`. SummaryWriterContext.add_scalar("entropy_temperature", self.entropy_temperature) SummaryWriterContext.add_histogram("log_prob_a", log_prob_a) if self.value_network: SummaryWriterContext.add_histogram("value_network/target", target_value) SummaryWriterContext.add_histogram("q_network/next_state_value", next_state_value) SummaryWriterContext.add_histogram("q_network/target_q_value", target_q_value) SummaryWriterContext.add_histogram("actor/min_q_actor_value", min_q_actor_value) SummaryWriterContext.add_histogram("actor/action_log_prob", actor_output.log_prob) SummaryWriterContext.add_histogram("actor/loss", actor_loss) if self.add_kld_to_loss: SummaryWriterContext.add_histogram("kld/mean", action_batch_m) SummaryWriterContext.add_histogram("kld/var", action_batch_v) SummaryWriterContext.add_scalar("kld/kld", kld) self.loss_reporter.report( td_loss=float(q1_loss), reward_loss=None, logged_rewards=reward, model_values_on_logged_actions=q1_value, model_propensities=actor_output.log_prob.exp(), model_values=min_q_actor_value, )
def test_swallowing_histogram_value_error(self): with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) with summary_writer_context(writer): SummaryWriterContext.add_histogram("bad_histogram", torch.ones(100, 1))
def dist(self, input: rlt.PreprocessedState): state = input.state.float_features x = state for i, activation in enumerate(self.activations[:-1]): if self.use_batch_norm: x = self.batch_norm_ops[i](x) x = self.layers[i](x) if activation == "linear": continue elif activation == "tanh": activation_func = torch.tanh else: activation_func = getattr(F, activation) x = activation_func(x) value = self.value(x).unsqueeze(dim=1) raw_advantage = self.advantage(x).reshape(-1, self.num_actions, self.num_atoms) advantage = raw_advantage - raw_advantage.mean(dim=1, keepdim=True) q_value = value + advantage if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram( "dueling_network/{}/value".format(self._name), value.detach().mean(dim=2).cpu(), ) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_value".format(self._name), value.detach().mean().cpu(), ) SummaryWriterContext.add_histogram( "dueling_network/{}/q_value".format(self._name), q_value.detach().mean(dim=2).cpu(), ) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_q_value".format(self._name), q_value.detach().mean().cpu(), ) SummaryWriterContext.add_histogram( "dueling_network/{}/raw_advantage".format(self._name), raw_advantage.detach().mean(dim=2).cpu(), ) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_raw_advantage".format(self._name), raw_advantage.detach().mean().cpu(), ) for i in range(advantage.shape[1]): a = advantage.detach()[:, i, :].mean(dim=1) SummaryWriterContext.add_histogram( "dueling_network/{}/advantage/{}".format(self._name, i), a.cpu()) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_advantage/{}".format( self._name, i), a.mean().cpu(), ) return q_value
def forward(self, input) -> Union[NamedTuple, torch.FloatTensor]: # type: ignore output_tensor = False if self.parametric_action: state = input.state.float_features action = input.action.float_features else: state = input.state.float_features action = None x = state for i, activation in enumerate(self.activations[:-1]): if self.use_batch_norm: x = self.batch_norm_ops[i](x) x = self.layers[i](x) if activation == "linear": continue elif activation == "tanh": activation_func = torch.tanh else: activation_func = getattr(F, activation) x = activation_func(x) value = self.value(x) if action is not None: x = torch.cat((x, action), dim=1) raw_advantage = self.advantage(x) if self.parametric_action: advantage = raw_advantage else: advantage = raw_advantage - raw_advantage.mean(dim=1, keepdim=True) q_value = value + advantage if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram( "dueling_network/{}/value".format(self._name), value.detach().cpu()) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_value".format(self._name), value.detach().mean().cpu(), ) SummaryWriterContext.add_histogram( "dueling_network/{}/q_value".format(self._name), q_value.detach().cpu()) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_q_value".format(self._name), q_value.detach().mean().cpu(), ) SummaryWriterContext.add_histogram( "dueling_network/{}/raw_advantage".format(self._name), raw_advantage.detach().cpu(), ) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_raw_advantage".format(self._name), raw_advantage.detach().mean().cpu(), ) if not self.parametric_action: advantage = advantage.detach() for i in range(advantage.shape[1]): a = advantage[:, i] SummaryWriterContext.add_histogram( "dueling_network/{}/advantage/{}".format( self._name, i), a.cpu()) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_advantage/{}".format( self._name, i), a.mean().cpu(), ) if output_tensor: return q_value # type: ignore elif self.parametric_action: return rlt.SingleQValue(q_value=q_value) # type: ignore else: return rlt.AllActionQValues(q_values=q_value) # type: ignore
def train(self, training_batch) -> None: """ IMPORTANT: the input action here is assumed to be preprocessed to match the range of the output of the actor. """ if hasattr(training_batch, "as_policy_network_training_batch"): training_batch = training_batch.as_policy_network_training_batch() learning_input = training_batch.training_input self.minibatch += 1 state = learning_input.state action = learning_input.action next_state = learning_input.next_state reward = learning_input.reward not_done_mask = learning_input.not_terminal action = self._maybe_scale_action_in_train(action.float_features) max_action = (self.max_action_range_tensor_training if self.max_action_range_tensor_training else torch.ones( action.shape, device=self.device)) min_action = (self.min_action_range_tensor_serving if self.min_action_range_tensor_serving else -torch.ones(action.shape, device=self.device)) # Compute current value estimates current_state_action = rlt.PreprocessedStateAction( state=state, action=rlt.PreprocessedFeatureVector(float_features=action)) q1_value = self.q1_network(current_state_action).q_value if self.q2_network: q2_value = self.q2_network(current_state_action).q_value actor_action = self.actor_network( rlt.PreprocessedState(state=state)).action # Generate target = r + y * min (Q1(s',pi(s')), Q2(s',pi(s'))) with torch.no_grad(): next_actor = self.actor_network_target( rlt.PreprocessedState(state=next_state)).action next_actor += (torch.randn_like(next_actor) * self.target_policy_smoothing).clamp( -self.noise_clip, self.noise_clip) next_actor = torch.max(torch.min(next_actor, max_action), min_action) next_state_actor = rlt.PreprocessedStateAction( state=next_state, action=rlt.PreprocessedFeatureVector( float_features=next_actor), ) next_state_value = self.q1_network_target(next_state_actor).q_value if self.q2_network is not None: next_state_value = torch.min( next_state_value, self.q2_network_target(next_state_actor).q_value) target_q_value = ( reward + self.gamma * next_state_value * not_done_mask.float()) # Optimize Q1 and Q2 q1_loss = F.mse_loss(q1_value, target_q_value) q1_loss.backward() self._maybe_run_optimizer(self.q1_network_optimizer, self.minibatches_per_step) if self.q2_network: q2_loss = F.mse_loss(q2_value, target_q_value) q2_loss.backward() self._maybe_run_optimizer(self.q2_network_optimizer, self.minibatches_per_step) # Only update actor and target networks after a fixed number of Q updates if self.minibatch % self.delayed_policy_update == 0: actor_loss = -self.q1_network( rlt.PreprocessedStateAction( state=state, action=rlt.PreprocessedFeatureVector( float_features=actor_action), )).q_value.mean() actor_loss.backward() self._maybe_run_optimizer(self.actor_network_optimizer, self.minibatches_per_step) # Use the soft update rule to update the target networks self._maybe_soft_update( self.q1_network, self.q1_network_target, self.tau, self.minibatches_per_step, ) self._maybe_soft_update( self.actor_network, self.actor_network_target, self.tau, self.minibatches_per_step, ) if self.q2_network is not None: self._maybe_soft_update( self.q2_network, self.q2_network_target, self.tau, self.minibatches_per_step, ) # Logging at the end to schedule all the cuda operations first if (self.tensorboard_logging_freq != 0 and self.minibatch % self.tensorboard_logging_freq == 0): SummaryWriterContext.add_histogram("q1/logged_state_value", q1_value) if self.q2_network: SummaryWriterContext.add_histogram("q2/logged_state_value", q2_value) SummaryWriterContext.add_histogram("q_network/next_state_value", next_state_value) SummaryWriterContext.add_histogram("q_network/target_q_value", target_q_value) SummaryWriterContext.add_histogram("actor/loss", actor_loss) self.loss_reporter.report( td_loss=float(q1_loss), reward_loss=None, logged_rewards=reward, model_values_on_logged_actions=q1_value, )
def _log_histogram_and_mean(name, key, x): SummaryWriterContext.add_histogram(f"dueling_network/{name}/{key}", x.detach().cpu()) SummaryWriterContext.add_scalar(f"dueling_network/{name}/mean_{key}", x.detach().mean().cpu())