def log_to_tensorboard(self, metric_name: str) -> None: self.check_estimates_exist() def none_to_zero(x: Optional[float]) -> float: if x is None or math.isnan(x): return 0.0 return x for name, value in [ ( "CPE/{}/Direct_Method_Reward".format(metric_name), # pyre-fixme[16]: `Optional` has no attribute `normalized`. self.direct_method.normalized, ), ( "CPE/{}/IPS_Reward".format(metric_name), self.inverse_propensity.normalized, ), ( "CPE/{}/Doubly_Robust_Reward".format(metric_name), self.doubly_robust.normalized, ), ( "CPE/{}/Sequential_Doubly_Robust".format(metric_name), self.sequential_doubly_robust.normalized, ), ( "CPE/{}/Weighted_Sequential_Doubly_Robust".format(metric_name), self.weighted_doubly_robust.normalized, ), ("CPE/{}/MAGIC".format(metric_name), self.magic.normalized), ]: SummaryWriterContext.add_scalar(name, none_to_zero(value))
def test_swallowing_exception(self): with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) writer.add_scalar = MagicMock( side_effect=NotImplementedError("test")) writer.exceptions_to_ignore = (NotImplementedError, KeyError) with summary_writer_context(writer): SummaryWriterContext.add_scalar("test", torch.ones(1))
def test_writing(self): with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) writer.add_scalar = MagicMock() with summary_writer_context(writer): SummaryWriterContext.add_scalar("test", torch.ones(1)) writer.add_scalar.assert_called_once_with("test", torch.ones(1), global_step=0)
def test_not_swallowing_exception(self): with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) writer.add_scalar = MagicMock( side_effect=NotImplementedError("test")) with self.assertRaisesRegex( NotImplementedError, "test"), summary_writer_context(writer): SummaryWriterContext.add_scalar("test", torch.ones(1))
def _log_histogram_and_mean(self, log_key, val): try: SummaryWriterContext.add_histogram(log_key, val) SummaryWriterContext.add_scalar(f"{log_key}/mean", val.mean()) except ValueError: logger.warning( f"Cannot create histogram for key: {log_key}; " "this is likely because you have NULL value in your input; " f"value: {val}") raise
def write_summary(self, actions: List[str]): if actions: for field, log_key in [ ("logged_actions", "actions/logged"), ("model_action_idxs", "actions/model"), ]: val = getattr(self, field) if val is None: continue for i, action in enumerate(actions): # pyre-fixme[16]: `SummaryWriterContext` has no attribute # `add_scalar`. SummaryWriterContext.add_scalar( "{}/{}".format(log_key, action), (val == i).sum().item() ) for field, log_key in [ ("td_loss", "td_loss"), ("imitator_loss", "imitator_loss"), ("reward_loss", "reward_loss"), ("logged_propensities", "propensities/logged"), ("logged_rewards", "reward/logged"), ("logged_values", "value/logged"), ("model_values_on_logged_actions", "value/model_logged_action"), ]: val = getattr(self, field) if val is None: continue assert len(val.shape) == 1 or ( len(val.shape) == 2 and val.shape[1] == 1 ), "Unexpected shape for {}: {}".format(field, val.shape) self._log_histogram_and_mean(log_key, val) for field, log_key in [ ("model_propensities", "propensities/model"), ("model_rewards", "reward/model"), ("model_values", "value/model"), ]: val = getattr(self, field) if val is None: continue if ( len(val.shape) == 1 or (len(val.shape) == 2 and val.shape[1] == 1) ) and not actions: self._log_histogram_and_mean(log_key, val) elif len(val.shape) == 2 and val.shape[1] == len(actions): for i, action in enumerate(actions): self._log_histogram_and_mean(f"{log_key}/{action}", val[:, i]) else: raise ValueError( "Unexpected shape for {}: {}; actions: {}".format( field, val.shape, actions ) )
def log_to_tensorboard(self, epoch: int) -> None: def none_to_zero(x: Optional[float]) -> float: if x is None or math.isnan(x): return 0.0 return x for name, value in [ ("Training/td_loss", self.get_recent_td_loss()), ("Training/reward_loss", self.get_recent_reward_loss()), ("Training/imitator_loss", self.get_recent_imitator_loss()), ]: SummaryWriterContext.add_scalar(name, none_to_zero(value), epoch)
def test_global_step(self): with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) writer.add_scalar = MagicMock() with summary_writer_context(writer): SummaryWriterContext.add_scalar("test", torch.ones(1)) SummaryWriterContext.increase_global_step() SummaryWriterContext.add_scalar("test", torch.zeros(1)) writer.add_scalar.assert_has_calls([ call("test", torch.ones(1), global_step=0), call("test", torch.zeros(1), global_step=1), ]) self.assertEqual(2, len(writer.add_scalar.mock_calls))
def test_writing_stack(self): with TemporaryDirectory() as tmp_dir1, TemporaryDirectory( ) as tmp_dir2: writer1 = SummaryWriter(tmp_dir1) writer1.add_scalar = MagicMock() writer2 = SummaryWriter(tmp_dir2) writer2.add_scalar = MagicMock() with summary_writer_context(writer1): with summary_writer_context(writer2): SummaryWriterContext.add_scalar("test2", torch.ones(1)) SummaryWriterContext.add_scalar("test1", torch.zeros(1)) writer1.add_scalar.assert_called_once_with("test1", torch.zeros(1), global_step=0) writer2.add_scalar.assert_called_once_with("test2", torch.ones(1), global_step=0)
def forward(self, input) -> Union[NamedTuple, torch.FloatTensor]: # type: ignore output_tensor = False if self.parametric_action: state = input.state.float_features action = input.action.float_features else: state = input.state.float_features action = None x = state for i, activation in enumerate(self.activations[:-1]): if self.use_batch_norm: x = self.batch_norm_ops[i](x) x = self.layers[i](x) if activation == "linear": continue elif activation == "tanh": activation_func = torch.tanh else: activation_func = getattr(F, activation) x = activation_func(x) value = self.value(x) if action is not None: x = torch.cat((x, action), dim=1) raw_advantage = self.advantage(x) if self.parametric_action: advantage = raw_advantage else: advantage = raw_advantage - raw_advantage.mean(dim=1, keepdim=True) q_value = value + advantage if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram( "dueling_network/{}/value".format(self._name), value.detach().cpu()) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_value".format(self._name), value.detach().mean().cpu(), ) SummaryWriterContext.add_histogram( "dueling_network/{}/q_value".format(self._name), q_value.detach().cpu()) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_q_value".format(self._name), q_value.detach().mean().cpu(), ) SummaryWriterContext.add_histogram( "dueling_network/{}/raw_advantage".format(self._name), raw_advantage.detach().cpu(), ) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_raw_advantage".format(self._name), raw_advantage.detach().mean().cpu(), ) if not self.parametric_action: advantage = advantage.detach() for i in range(advantage.shape[1]): a = advantage[:, i] SummaryWriterContext.add_histogram( "dueling_network/{}/advantage/{}".format( self._name, i), a.cpu()) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_advantage/{}".format( self._name, i), a.mean().cpu(), ) if output_tensor: return q_value # type: ignore elif self.parametric_action: return rlt.SingleQValue(q_value=q_value) # type: ignore else: return rlt.AllActionQValues(q_values=q_value) # type: ignore
def aggregate(self, values): for i, action in enumerate(self.actions): SummaryWriterContext.add_scalar( f"{self.log_key}/{action}", (values == i).sum().item() )
def update(self, key: str, value): # pyre-fixme[16]: `SummaryWriterContext` has no attribute `add_scalar`. SummaryWriterContext.add_scalar(self.logging_key, value)
def update(self, key: str, value): SummaryWriterContext.add_scalar(self.logging_key, value)
def test_with_none(self): with summary_writer_context(None): self.assertIsNone( SummaryWriterContext.add_scalar("test", torch.ones(1)))
def test_noop(self): self.assertIsNone( SummaryWriterContext.add_scalar("test", torch.ones(1)))
def _log_histogram_and_mean(name, key, x): SummaryWriterContext.add_histogram(f"dueling_network/{name}/{key}", x.detach().cpu()) SummaryWriterContext.add_scalar(f"dueling_network/{name}/mean_{key}", x.detach().mean().cpu())
def train(self, training_batch: rlt.PolicyNetworkInput) -> None: """ IMPORTANT: the input action here is assumed to match the range of the output of the actor. """ if isinstance(training_batch, TrainingDataPage): training_batch = training_batch.as_policy_network_training_batch() assert isinstance(training_batch, rlt.PolicyNetworkInput) self.minibatch += 1 state = training_batch.state action = training_batch.action reward = training_batch.reward discount = torch.full_like(reward, self.gamma) not_done_mask = training_batch.not_terminal # We need to zero out grad here because gradient from actor update # should not be used in Q-network update self.actor_network_optimizer.zero_grad() self.q1_network_optimizer.zero_grad() if self.q2_network is not None: self.q2_network_optimizer.zero_grad() if self.value_network is not None: self.value_network_optimizer.zero_grad() with torch.enable_grad(): # # First, optimize Q networks; minimizing MSE between # Q(s, a) & r + discount * V'(next_s) # q1_value = self.q1_network(state, action) if self.q2_network: q2_value = self.q2_network(state, action) actor_output = self.actor_network(state) # Optimize Alpha if self.alpha_optimizer is not None: alpha_loss = -((self.log_alpha * (actor_output.log_prob + self.target_entropy).detach()).mean()) self.alpha_optimizer.zero_grad() alpha_loss.backward() self.alpha_optimizer.step() self.entropy_temperature = self.log_alpha.exp() with torch.no_grad(): if self.value_network is not None: next_state_value = self.value_network_target( training_batch.next_state.float_features) else: next_state_actor_output = self.actor_network( training_batch.next_state) next_state_actor_action = ( training_batch.next_state, rlt.FeatureData(next_state_actor_output.action), ) next_state_value = self.q1_network_target( *next_state_actor_action) if self.q2_network is not None: target_q2_value = self.q2_network_target( *next_state_actor_action) next_state_value = torch.min(next_state_value, target_q2_value) log_prob_a = self.actor_network.get_log_prob( training_batch.next_state, next_state_actor_output.action) log_prob_a = log_prob_a.clamp(-20.0, 20.0) next_state_value -= self.entropy_temperature * log_prob_a if self.gamma > 0.0: target_q_value = ( reward + discount * next_state_value * not_done_mask.float()) else: # This is useful in debugging instability issues target_q_value = reward q1_loss = F.mse_loss(q1_value, target_q_value) q1_loss.backward() self._maybe_run_optimizer(self.q1_network_optimizer, self.minibatches_per_step) if self.q2_network: # pyre-fixme[18]: Global name `q2_value` is undefined. q2_loss = F.mse_loss(q2_value, target_q_value) q2_loss.backward() self._maybe_run_optimizer(self.q2_network_optimizer, self.minibatches_per_step) # Second, optimize the actor; minimizing KL-divergence between # propensity & softmax of value. Due to reparameterization trick, # it ends up being log_prob(actor_action) - Q(s, actor_action) state_actor_action = (state, rlt.FeatureData(actor_output.action)) q1_actor_value = self.q1_network(*state_actor_action) min_q_actor_value = q1_actor_value if self.q2_network: q2_actor_value = self.q2_network(*state_actor_action) min_q_actor_value = torch.min(q1_actor_value, q2_actor_value) actor_loss = (self.entropy_temperature * actor_output.log_prob - min_q_actor_value) # Do this in 2 steps so we can log histogram of actor loss actor_loss_mean = actor_loss.mean() if self.add_kld_to_loss: if self.apply_kld_on_mean: action_batch_m = torch.mean(actor_output.action_mean, axis=0) action_batch_v = torch.var(actor_output.action_mean, axis=0) else: action_batch_m = torch.mean(actor_output.action, axis=0) action_batch_v = torch.var(actor_output.action, axis=0) kld = ( 0.5 # pyre-fixme[16]: `int` has no attribute `sum`. * ((action_batch_v + (action_batch_m - self.action_emb_mean)**2) / self.action_emb_variance - 1 + self.action_emb_variance.log() - action_batch_v.log()).sum()) actor_loss_mean += self.kld_weight * kld actor_loss_mean.backward() self._maybe_run_optimizer(self.actor_network_optimizer, self.minibatches_per_step) # # Lastly, if applicable, optimize value network; minimizing MSE between # V(s) & E_a~pi(s) [ Q(s,a) - log(pi(a|s)) ] # if self.value_network is not None: state_value = self.value_network(state.float_features) if self.logged_action_uniform_prior: log_prob_a = torch.zeros_like(min_q_actor_value) target_value = min_q_actor_value else: with torch.no_grad(): log_prob_a = actor_output.log_prob log_prob_a = log_prob_a.clamp(-20.0, 20.0) target_value = (min_q_actor_value - self.entropy_temperature * log_prob_a) value_loss = F.mse_loss(state_value, target_value.detach()) value_loss.backward() self._maybe_run_optimizer(self.value_network_optimizer, self.minibatches_per_step) # Use the soft update rule to update the target networks if self.value_network is not None: self._maybe_soft_update( self.value_network, self.value_network_target, self.tau, self.minibatches_per_step, ) else: self._maybe_soft_update( self.q1_network, self.q1_network_target, self.tau, self.minibatches_per_step, ) if self.q2_network is not None: self._maybe_soft_update( self.q2_network, self.q2_network_target, self.tau, self.minibatches_per_step, ) # Logging at the end to schedule all the cuda operations first if (self.tensorboard_logging_freq != 0 and self.minibatch % self.tensorboard_logging_freq == 0): SummaryWriterContext.add_histogram("q1/logged_state_value", q1_value) if self.q2_network: SummaryWriterContext.add_histogram("q2/logged_state_value", q2_value) # pyre-fixme[16]: `SummaryWriterContext` has no attribute `add_scalar`. SummaryWriterContext.add_scalar("entropy_temperature", self.entropy_temperature) SummaryWriterContext.add_histogram("log_prob_a", log_prob_a) if self.value_network: SummaryWriterContext.add_histogram("value_network/target", target_value) SummaryWriterContext.add_histogram("q_network/next_state_value", next_state_value) SummaryWriterContext.add_histogram("q_network/target_q_value", target_q_value) SummaryWriterContext.add_histogram("actor/min_q_actor_value", min_q_actor_value) SummaryWriterContext.add_histogram("actor/action_log_prob", actor_output.log_prob) SummaryWriterContext.add_histogram("actor/loss", actor_loss) if self.add_kld_to_loss: SummaryWriterContext.add_histogram("kld/mean", action_batch_m) SummaryWriterContext.add_histogram("kld/var", action_batch_v) SummaryWriterContext.add_scalar("kld/kld", kld) self.loss_reporter.report( td_loss=float(q1_loss), reward_loss=None, logged_rewards=reward, model_values_on_logged_actions=q1_value, model_propensities=actor_output.log_prob.exp(), model_values=min_q_actor_value, )
def train(self, training_batch: rlt.PolicyNetworkInput) -> None: """ IMPORTANT: the input action here is assumed to be preprocessed to match the range of the output of the actor. """ assert isinstance(training_batch, rlt.PolicyNetworkInput) self.minibatch += 1 state = training_batch.state action = training_batch.action next_state = training_batch.next_state reward = training_batch.reward not_terminal = training_batch.not_terminal # Generate target = r + y * min (Q1(s',pi(s')), Q2(s',pi(s'))) with torch.no_grad(): next_actor = self.actor_network_target(next_state).action noise = torch.randn_like(next_actor) * self.noise_variance next_actor = (next_actor + noise.clamp(*self.noise_clip_range)).clamp( *CONTINUOUS_TRAINING_ACTION_RANGE) next_state_actor = (next_state, rlt.FeatureData(next_actor)) next_q_value = self.q1_network_target(*next_state_actor) if self.q2_network is not None: next_q_value = torch.min( next_q_value, self.q2_network_target(*next_state_actor)) target_q_value = reward + self.gamma * next_q_value * not_terminal.float( ) # Optimize Q1 and Q2 # NOTE: important to zero here (instead of using _maybe_update) # since q1 may have accumulated gradients from actor network update self.q1_network_optimizer.zero_grad() q1_value = self.q1_network(state, action) q1_loss = self.q_network_loss(q1_value, target_q_value) q1_loss.backward() self.q1_network_optimizer.step() if self.q2_network: self.q2_network_optimizer.zero_grad() q2_value = self.q2_network(state, action) q2_loss = self.q_network_loss(q2_value, target_q_value) q2_loss.backward() self.q2_network_optimizer.step() # Only update actor and target networks after a fixed number of Q updates if self.minibatch % self.delayed_policy_update == 0: self.actor_network_optimizer.zero_grad() actor_action = self.actor_network(state).action actor_q1_value = self.q1_network(state, rlt.FeatureData(actor_action)) actor_loss = -(actor_q1_value.mean()) actor_loss.backward() self.actor_network_optimizer.step() self._soft_update(self.q1_network, self.q1_network_target, self.tau) self._soft_update(self.q2_network, self.q2_network_target, self.tau) self._soft_update(self.actor_network, self.actor_network_target, self.tau) # Logging at the end to schedule all the cuda operations first if (self.tensorboard_logging_freq != 0 and self.minibatch % self.tensorboard_logging_freq == 0): logs = { "loss/q1_loss": q1_loss, "loss/actor_loss": actor_loss, "q_value/q1_value": q1_value, "q_value/next_q_value": next_q_value, "q_value/target_q_value": target_q_value, "q_value/actor_q1_value": actor_q1_value, } if self.q2_network: logs.update({ "loss/q2_loss": q2_loss, "q_value/q2_value": q2_value }) for k, v in logs.items(): v = v.detach().cpu() if v.dim() == 0: # pyre-fixme[16]: `SummaryWriterContext` has no attribute # `add_scalar`. SummaryWriterContext.add_scalar(k, v.item()) continue elif v.dim() == 2: v = v.squeeze(1) assert v.dim() == 1 SummaryWriterContext.add_histogram(k, v.numpy()) SummaryWriterContext.add_scalar(f"{k}_mean", v.mean().item()) self.loss_reporter.report( td_loss=float(q1_loss), reward_loss=None, logged_rewards=reward, model_values_on_logged_actions=q1_value, )
def dist(self, input: rlt.PreprocessedState): state = input.state.float_features x = state for i, activation in enumerate(self.activations[:-1]): if self.use_batch_norm: x = self.batch_norm_ops[i](x) x = self.layers[i](x) if activation == "linear": continue elif activation == "tanh": activation_func = torch.tanh else: activation_func = getattr(F, activation) x = activation_func(x) value = self.value(x).unsqueeze(dim=1) raw_advantage = self.advantage(x).reshape(-1, self.num_actions, self.num_atoms) advantage = raw_advantage - raw_advantage.mean(dim=1, keepdim=True) q_value = value + advantage if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram( "dueling_network/{}/value".format(self._name), value.detach().mean(dim=2).cpu(), ) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_value".format(self._name), value.detach().mean().cpu(), ) SummaryWriterContext.add_histogram( "dueling_network/{}/q_value".format(self._name), q_value.detach().mean(dim=2).cpu(), ) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_q_value".format(self._name), q_value.detach().mean().cpu(), ) SummaryWriterContext.add_histogram( "dueling_network/{}/raw_advantage".format(self._name), raw_advantage.detach().mean(dim=2).cpu(), ) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_raw_advantage".format(self._name), raw_advantage.detach().mean().cpu(), ) for i in range(advantage.shape[1]): a = advantage.detach()[:, i, :].mean(dim=1) SummaryWriterContext.add_histogram( "dueling_network/{}/advantage/{}".format(self._name, i), a.cpu()) SummaryWriterContext.add_scalar( "dueling_network/{}/mean_advantage/{}".format( self._name, i), a.mean().cpu(), ) return q_value