def test_rescale_torch_tensor(self): rows, cols = 3, 5 original_tensor = torch.randint(low=10, high=40, size=(rows, cols)).float() prev_max_tensor = torch.ones(1, 5) * 40.0 prev_min_tensor = torch.ones(1, 5) * 10.0 new_min_tensor = torch.ones(1, 5) * -1.0 new_max_tensor = torch.ones(1, 5).float() print("Original tensor: ", original_tensor) rescaled_tensor = rescale_torch_tensor( original_tensor, new_min_tensor, new_max_tensor, prev_min_tensor, prev_max_tensor, ) print("Rescaled tensor: ", rescaled_tensor) reconstructed_original_tensor = rescale_torch_tensor( rescaled_tensor, prev_min_tensor, prev_max_tensor, new_min_tensor, new_max_tensor, ) print("Reconstructed Original tensor: ", reconstructed_original_tensor) comparison_tensor = torch.eq(original_tensor, reconstructed_original_tensor) self.assertTrue(torch.sum(comparison_tensor), rows * cols)
def internal_prediction(self, states, test=False): """ Returns list of actions output from actor network :param states states as list of states to produce actions for """ self.actor_network.eval() with torch.no_grad(): actions = self.actor_network( rlt.PreprocessedState.from_tensor(states)).action if not test: if self.minibatch < self.initial_exploration_ts: actions = (torch.rand_like(actions) * (self.max_action_range_tensor_training - self.min_action_range_tensor_training) + self.min_action_range_tensor_training) else: actions += torch.randn_like(actions) * self.exploration_noise # clamp actions to make sure actions are in the range clamped_actions = torch.max( torch.min(actions, self.max_action_range_tensor_training), self.min_action_range_tensor_training, ) rescaled_actions = rescale_torch_tensor( clamped_actions, new_min=self.min_action_range_tensor_serving, new_max=self.max_action_range_tensor_serving, prev_min=self.min_action_range_tensor_training, prev_max=self.max_action_range_tensor_training, ) self.actor_network.train() return rescaled_actions
def _maybe_scale_action_in_train(self, action): if (self.min_action_range_tensor_training is not None and self.max_action_range_tensor_training is not None and self.min_action_range_tensor_serving is not None and self.max_action_range_tensor_serving is not None): action = rescale_torch_tensor( action, new_min=self.min_action_range_tensor_training, new_max=self.max_action_range_tensor_training, prev_min=self.min_action_range_tensor_serving, prev_max=self.max_action_range_tensor_serving, ) return action
def sample_action(self, scores: GaussianSamplerScore) -> rlt.ActorOutput: self.actor_network.eval() action, log_prob = self._sample_action(scores.loc, scores.scale_log) # clamp actions to make sure actions are in the range clamped_actions = torch.max( torch.min(action, self.max_training_action), self.min_training_action) rescaled_actions = rescale_torch_tensor( clamped_actions, new_min=self.min_serving_action, new_max=self.max_serving_action, prev_min=self.min_training_action, prev_max=self.max_training_action, ) self.actor_network.train() action = rescaled_actions.squeeze(0) log_prob = torch.tensor(log_prob.item()) return rlt.ActorOutput(action=action, log_prob=log_prob)
def internal_prediction(self, states, test=False): """ Returns list of actions output from actor network :param states states as list of states to produce actions for """ self.actor_network.eval() with torch.no_grad(): actions = self.actor_network(rlt.PreprocessedState.from_tensor(states)) # clamp actions to make sure actions are in the range clamped_actions = torch.max( torch.min(actions.action, self.max_action_range_tensor_training), self.min_action_range_tensor_training, ) rescaled_actions = rescale_torch_tensor( clamped_actions, new_min=self.min_action_range_tensor_serving, new_max=self.max_action_range_tensor_serving, prev_min=self.min_action_range_tensor_training, prev_max=self.max_action_range_tensor_training, ) self.actor_network.train() return rescaled_actions
def train(self, training_batch) -> None: """ IMPORTANT: the input action here is assumed to be preprocessed to match the range of the output of the actor. """ if hasattr(training_batch, "as_policy_network_training_batch"): training_batch = training_batch.as_policy_network_training_batch() learning_input = training_batch.training_input self.minibatch += 1 state = learning_input.state action = learning_input.action reward = learning_input.reward discount = torch.full_like(reward, self.gamma) not_done_mask = learning_input.not_terminal if self._should_scale_action_in_train(): action = action._replace( float_features=rescale_torch_tensor( action.float_features, new_min=self.min_action_range_tensor_training, new_max=self.max_action_range_tensor_training, prev_min=self.min_action_range_tensor_serving, prev_max=self.max_action_range_tensor_serving, ) ) with torch.enable_grad(): # # First, optimize Q networks; minimizing MSE between # Q(s, a) & r + discount * V'(next_s) # current_state_action = rlt.PreprocessedStateAction( state=state, action=action ) q1_value = self.q1_network(current_state_action).q_value if self.q2_network: q2_value = self.q2_network(current_state_action).q_value actor_output = self.actor_network(rlt.PreprocessedState(state=state)) # Optimize Alpha if self.alpha_optimizer is not None: alpha_loss = -( self.log_alpha * (actor_output.log_prob + self.target_entropy).detach() ).mean() self.alpha_optimizer.zero_grad() alpha_loss.backward() self.alpha_optimizer.step() self.entropy_temperature = self.log_alpha.exp() with torch.no_grad(): if self.value_network is not None: next_state_value = self.value_network_target( learning_input.next_state.float_features ) else: next_state_actor_output = self.actor_network( rlt.PreprocessedState(state=learning_input.next_state) ) next_state_actor_action = rlt.PreprocessedStateAction( state=learning_input.next_state, action=rlt.PreprocessedFeatureVector( float_features=next_state_actor_output.action ), ) next_state_value = self.q1_network_target( next_state_actor_action ).q_value if self.q2_network is not None: target_q2_value = self.q2_network_target( next_state_actor_action ).q_value next_state_value = torch.min(next_state_value, target_q2_value) log_prob_a = self.actor_network.get_log_prob( learning_input.next_state, next_state_actor_output.action ) log_prob_a = log_prob_a.clamp(-20.0, 20.0) next_state_value -= self.entropy_temperature * log_prob_a target_q_value = ( reward + discount * next_state_value * not_done_mask.float() ) q1_loss = F.mse_loss(q1_value, target_q_value) q1_loss.backward() self._maybe_run_optimizer( self.q1_network_optimizer, self.minibatches_per_step ) if self.q2_network: q2_loss = F.mse_loss(q2_value, target_q_value) q2_loss.backward() self._maybe_run_optimizer( self.q2_network_optimizer, self.minibatches_per_step ) # # Second, optimize the actor; minimizing KL-divergence between action propensity # & softmax of value. Due to reparameterization trick, it ends up being # log_prob(actor_action) - Q(s, actor_action) # state_actor_action = rlt.PreprocessedStateAction( state=state, action=rlt.PreprocessedFeatureVector( float_features=actor_output.action ), ) q1_actor_value = self.q1_network(state_actor_action).q_value min_q_actor_value = q1_actor_value if self.q2_network: q2_actor_value = self.q2_network(state_actor_action).q_value min_q_actor_value = torch.min(q1_actor_value, q2_actor_value) actor_loss = ( self.entropy_temperature * actor_output.log_prob - min_q_actor_value ) # Do this in 2 steps so we can log histogram of actor loss actor_loss_mean = actor_loss.mean() actor_loss_mean.backward() self._maybe_run_optimizer( self.actor_network_optimizer, self.minibatches_per_step ) # # Lastly, if applicable, optimize value network; minimizing MSE between # V(s) & E_a~pi(s) [ Q(s,a) - log(pi(a|s)) ] # if self.value_network is not None: state_value = self.value_network(state.float_features) if self.logged_action_uniform_prior: log_prob_a = torch.zeros_like(min_q_actor_value) target_value = min_q_actor_value else: with torch.no_grad(): log_prob_a = actor_output.log_prob log_prob_a = log_prob_a.clamp(-20.0, 20.0) target_value = ( min_q_actor_value - self.entropy_temperature * log_prob_a ) value_loss = F.mse_loss(state_value, target_value.detach()) value_loss.backward() self._maybe_run_optimizer( self.value_network_optimizer, self.minibatches_per_step ) # Use the soft update rule to update the target networks if self.value_network is not None: self._maybe_soft_update( self.value_network, self.value_network_target, self.tau, self.minibatches_per_step, ) else: self._maybe_soft_update( self.q1_network, self.q1_network_target, self.tau, self.minibatches_per_step, ) if self.q2_network is not None: self._maybe_soft_update( self.q2_network, self.q2_network_target, self.tau, self.minibatches_per_step, ) # Logging at the end to schedule all the cuda operations first if ( self.tensorboard_logging_freq is not None and self.minibatch % self.tensorboard_logging_freq == 0 ): SummaryWriterContext.add_histogram("q1/logged_state_value", q1_value) if self.q2_network: SummaryWriterContext.add_histogram("q2/logged_state_value", q2_value) SummaryWriterContext.add_histogram("log_prob_a", log_prob_a) if self.value_network: SummaryWriterContext.add_histogram("value_network/target", target_value) SummaryWriterContext.add_histogram( "q_network/next_state_value", next_state_value ) SummaryWriterContext.add_histogram( "q_network/target_q_value", target_q_value ) SummaryWriterContext.add_histogram( "actor/min_q_actor_value", min_q_actor_value ) SummaryWriterContext.add_histogram( "actor/action_log_prob", actor_output.log_prob ) SummaryWriterContext.add_histogram("actor/loss", actor_loss) self.loss_reporter.report( td_loss=float(q1_loss), reward_loss=None, logged_rewards=reward, model_values_on_logged_actions=q1_value, model_propensities=actor_output.log_prob.exp(), model_values=min_q_actor_value, )