def _preprocess_advantages(self, advantages): if self._advantage_normalization: advantages = ( (advantages - jnp.mean(advantages)) / (jnp.std(advantages) + self._advantage_normalization_epsilon) ) return advantages
def PPOObjective(dist_inputs, values, returns, dones, rewards, actions, old_log_probs, log_prob_fun, epsilon, normalize_advantages): """PPO Objective.""" # dist_inputs of the shape float32[128,1,18] # values of the shape float32[128,1,1] # returns of the shape float32[128,1,1] # dones of the shape float32[128,1,1] # rewards of the shape int32[128,1,1] # actions of the shape int32[128,1] # and old_log_probs of the shape float32[128,1] returns = returns.squeeze(axis=2) values = values.squeeze(axis=2) dones = dones.squeeze(axis=2) rewards = rewards.squeeze(axis=2) assert rewards.shape == dones.shape, ( f'rewards.shape was {rewards.shape} and dones.shape was {dones.shape}') assert dones.shape == values.shape, ( f'dones.shape was {dones.shape} and values.shape was {values.shape}') assert returns.shape == values.shape, ( f'returns.shape was {returns.shape} and values.shape was {values.shape}' ) assert returns.shape == old_log_probs.shape, ( f'returns.shape was {returns.shape} and' f'old_log_probs.shape was {old_log_probs.shape}') probs_ratio = ProbsRatio(dist_inputs, actions, old_log_probs, log_prob_fun) assert probs_ratio.shape == old_log_probs.shape, ( f'probs_ratio.shape was {probs_ratio.shape} and' f'old_log_probs.shape was {old_log_probs.shape}') # jaxified versions of # returns[dones] = rewards[dones] # values[dones] = 0 returns = jnp.where(dones, rewards, returns) values = jnp.where(dones, jnp.zeros_like(values), values) advantages = returns - values if normalize_advantages: advantages = advantages - jnp.mean(advantages) advantages /= jnp.std(advantages) + 1e-8 assert old_log_probs.shape == advantages.shape, ( f'old_log_probs.shape was {old_log_probs.shape} and advantages.shape was ' f'{advantages.shape}') unclipped_objective = UnclippedObjective(probs_ratio, advantages) assert unclipped_objective.shape == advantages.shape, ( f'old_log_probs.shape was {old_log_probs.shape} and' f'unclipped_objective.shape was {unclipped_objective.shape}') clipped_objective = ClippedObjective(probs_ratio, advantages, epsilon) assert clipped_objective.shape == advantages.shape, ( f'clipped_objective.shape was {clipped_objective.shape} and' f'advantages.shape was {advantages.shape}') ppo_objective = jnp.minimum(unclipped_objective, clipped_objective) assert ppo_objective.shape == advantages.shape, ( f'ppo_objective.shape was {ppo_objective.shape} and' f'advantages.shape was {advantages.shape}') return ppo_objective
def _calc_adv(self, new_vals, vals, valid_mask): adv = new_vals - vals valid_adv = adv[valid_mask] adv_mean = jnp.mean(valid_adv) adv_std = jnp.std(valid_adv) norm_adv = (adv - adv_mean) / (adv_std + self.ADV_EPS) return adv, norm_adv, adv_mean, adv_std
def A2CObjective(dist_inputs, values, returns, actions, mask, log_prob_fun, normalize_advantages): """Definition of the Advantage Actor Critic (A2C) loss.""" returns = returns.squeeze() values = values.squeeze() new_log_probs = NewLogProbs(dist_inputs, actions, log_prob_fun) advantages = returns - values if normalize_advantages: advantages = advantages - jnp.mean(advantages) advantages /= jnp.std(advantages) + 1e-8 return -jnp.sum(new_log_probs * advantages * mask) / jnp.sum(mask)
def A2CObjective(dist_inputs, values, returns, dones, rewards, actions, mask, log_prob_fun, normalize_advantages): """Definition of the Advantage Actor Critic (A2C) loss.""" # dist_inputs of the shape float32[128,1,18] # values of the shape float32[128,1,1] # returns of the shape float32[128,1,1] # dones of the shape int32[128,1,1] # actions of the shape int32[128,1] # and mask of the shape float32[128,1] # We have to squeeze values and returns, because we # are planning to compute (return - values) * new_log_probs * mask # and all of them should be of the same dimension values = values.squeeze(axis=2) returns = returns.squeeze(axis=2) dones = dones.squeeze(axis=2) rewards = rewards.squeeze(axis=2) assert rewards.shape == dones.shape, ( f'rewards.shape was {rewards.shape} and dones.shape was {dones.shape}') assert dones.shape == values.shape, ( f'dones.shape was {dones.shape} and values.shape was {values.shape}') assert returns.shape == values.shape, ( f'returns.shape was {returns.shape} and values.shape was {values.shape}' ) assert values.shape == mask.shape, ( f'values.shape was {values.shape} and mask.shape was {mask.shape}') assert returns.shape[0] == dist_inputs.shape[0], ( f'returns.shape[0] was {returns.shape[0]} and dist_inputs.shape[0] was ' f'{dist_inputs.shape[0]}') new_log_probs = NewLogProbs(dist_inputs, actions, log_prob_fun) assert new_log_probs.shape == mask.shape, ( f'new_log_probs.shape was {new_log_probs.shape} and mask.shape was ' f'{mask.shape}') # jaxified versions of # returns[dones] = rewards[dones] # values[dones] = 0 returns = jnp.where(dones, rewards, returns) values = jnp.where(dones, jnp.zeros_like(values), values) advantages = returns - values if normalize_advantages: advantages = advantages - jnp.mean(advantages) advantages /= jnp.std(advantages) + 1e-8 assert new_log_probs.shape == advantages.shape, ( f'new_log_probs.shape was {new_log_probs.shape} and advantages.shape was ' f'{advantages.shape}') # One of the motivation to the squeezes and assertions is to # avoid [128,1] * [128,1,1] * [128] multiplications in the definition # of the a2c objective - we insist on the same shapes a2c_objective = -jnp.sum(new_log_probs * advantages * mask) / jnp.sum(mask) return a2c_objective
def PPOObjective(dist_inputs, values, returns, actions, old_log_probs, log_prob_fun, epsilon, normalize_advantages): """PPO Objective.""" # Returns and values are arriving with two extra dimensions # TODO(henrykm): remove these dimensions at an earlier stage? returns = returns.squeeze() values = values.squeeze() probs_ratio = ProbsRatio(dist_inputs, actions, old_log_probs, log_prob_fun) advantages = returns - values if normalize_advantages: advantages = advantages - jnp.mean(advantages) advantages /= jnp.std(advantages) + 1e-8 unclipped_objective = UnclippedObjective(probs_ratio, advantages) clipped_objective = ClippedObjective(probs_ratio, advantages, epsilon) ppo_objective = jnp.minimum(unclipped_objective, clipped_objective) return ppo_objective
def policy_inputs(self, trajectory, values): """Create inputs to policy model from a TrajectoryNp and values.""" # How much TD to use is determined by the added policy slice length, # as the policy batches need to be this much longer to calculate TD. advantages = self._advantage_estimator( trajectory.rewards, trajectory.returns, values, gamma=self._task.gamma, n_extra_steps=self._added_policy_slice_length, ) if self._advantage_normalization: advantages = ( (advantages - jnp.mean(advantages)) / (jnp.std(advantages) + self._advantage_normalization_epsilon)) # Observations should be the same length as advantages - so if we are # using n_extra_steps, we need to trim the length to match. obs = trajectory.observations[:, :advantages.shape[1]] act = trajectory.actions[:, :advantages.shape[1]] old_logps = trajectory.log_probs[:, :advantages.shape[1]] mask = trajectory.mask[:, :advantages. shape[1]] # Mask to zero-out padding. # Shape checks to help debugging. if len(advantages.shape) != 2: raise ValueError('Advantages are expected to have shape ' + '[batch_size, length], got: %s' % str(advantages.shape)) if act.shape[0:2] != advantages.shape: raise ValueError( 'First 2 dimensions of actions should be the same as in ' 'advantages, %s != %s' % (act.shape[0:2], advantages.shape)) if obs.shape[0:2] != advantages.shape: raise ValueError( 'First 2 dimensions of observations should be the same ' 'as in advantages, %s != %s' % (obs.shape[0:2], advantages.shape)) if old_logps.shape != advantages.shape: raise ValueError( 'Old log-probs and advantages shapes should be the same' ', %s != %s' % (old_logps.shape, advantages.shape)) if mask.shape != advantages.shape: raise ValueError('Mask and advantages shapes should be the same' ', %s != %s' % (mask.shape, advantages.shape)) return (obs, act, advantages, old_logps, mask)
def policy_metrics(self): metrics = { 'policy_loss': self.policy_loss, 'advantage_mean': tl.Serial( self._policy_inputs_to_advantages(False), tl.Fn('Mean', lambda x: jnp.mean(x)) # pylint: disable=unnecessary-lambda ), 'advantage_std': tl.Serial( self._policy_inputs_to_advantages(False), tl.Fn('Std', lambda x: jnp.std(x)) # pylint: disable=unnecessary-lambda ) } metrics.update( awr_metrics( self._beta, preprocess_layer=self._policy_inputs_to_advantages(True))) return metrics
def advantage_std(self): return tl.Serial([ # (dist_inputs, advantages, old_dist_inputs, mask) tl.Select([1]), # Select just the advantages. tl.Fn('AdvantageStd', lambda x: jnp.std(x)), # pylint: disable=unnecessary-lambda ])
def normalize(adv): return ( (adv - jnp.mean(adv)) / (jnp.std(adv) + self._advantage_normalization_epsilon) )
def train_epoch(self, evaluate=True): epoch_start_time = time.time() # Evaluate the policy. policy_eval_start_time = time.time() if evaluate and (self.epoch + 1) % self._eval_every_n == 0: self.evaluate() policy_eval_time = policy_based_utils.get_time(policy_eval_start_time) def write_metric(key, value): self._train_sw.scalar(key, value, step=self.epoch) self._history.append('train', key, self.epoch, value) # Get fresh trajectories every time. self._should_reset_train_env = True trajectory_collection_start_time = time.time() logging.vlog(1, 'AWR epoch [% 6d]: collecting trajectories.', self._epoch) trajs, _, timing_info, self._model_state = self.collect_trajectories( train=True, temperature=1.0, raw_trajectory=True) del timing_info trajectory_collection_time = policy_based_utils.get_time( trajectory_collection_start_time) logging.vlog(1, 'AWR epoch [% 6d]: n_trajectories [%s].', self._epoch, len(trajs)) # Convert these into numpy now. def extract_obs_act_rew_dones(traj_np): return traj_np[0], traj_np[1], traj_np[2], traj_np[4] trajs_np = [extract_obs_act_rew_dones(traj.as_numpy) for traj in trajs] # number of new actions. new_sample_count = sum(traj[1].shape[0] for traj in trajs_np) self._n_observations_seen += new_sample_count logging.vlog(1, 'AWR epoch [% 6d]: new_sample_count [%d].', self._epoch, new_sample_count) if self._should_write_summaries: write_metric('trajs/batch', len(trajs)) write_metric('trajs/new_sample_count', new_sample_count) # The number of trajectories, i.e. `B`can keep changing from iteration to # iteration, since we are capped on the number of observations requested. # So let's operate on each trajectory on this own? # TODO(afrozm): So should our batches look like (B, T+1, *OBS) or B # different examples of (T+1, *OBS) each. Since B can keep changing? # Add these to the replay buffer. for traj in trajs: _ = self._replay_buffer.store(traj) rewards = jnp.array([jnp.sum(traj[2]) for traj in trajs_np]) avg_reward = jnp.mean(rewards) std_reward = jnp.std(rewards) max_reward = jnp.max(rewards) min_reward = jnp.min(rewards) self._log('train', 'train/reward_mean_truncated', avg_reward) if evaluate and not self._separate_eval and self._should_write_summaries: metrics = {'raw': {1.0: {'mean': avg_reward, 'std': std_reward}}} policy_based_utils.write_eval_reward_summaries( metrics, self._log, self.epoch) logging.vlog( 1, 'AWR epoch [% 6d]: Rewards avg=[%0.2f], max=[%0.2f], ' 'min=[%0.2f].', self.epoch, avg_reward, max_reward, min_reward) if self._should_write_summaries: write_metric('reward/avg', avg_reward) write_metric('reward/std', std_reward) write_metric('reward/max', max_reward) write_metric('reward/min', min_reward) # Wrap these observations/rewards inside ReplayBuffer. idx, valid_mask, valid_idx = self._replay_buffer.get_valid_indices() # pylint: disable=g-complex-comprehension observations = [ self._replay_buffer.get( replay_buffer.ReplayBuffer.OBSERVATIONS_KEY, idx[start_idx:end_plus_1_idx]) for (start_idx, end_plus_1_idx) in self._replay_buffer.iterate_over_paths(idx) ] rewards = [ self._replay_buffer.get(replay_buffer.ReplayBuffer.REWARDS_KEY, idx[start_idx:end_plus_1_idx][:-1]) for (start_idx, end_plus_1_idx) in self._replay_buffer.iterate_over_paths(idx) ] # pylint: enable=g-complex-comprehension t_final = awr_utils.padding_length(rewards, boundary=self._boundary) logging.vlog(1, 'AWR epoch [% 6d]: t_final [%s].', self._epoch, t_final) if self._should_write_summaries: write_metric('trajs/t_final', t_final) # These padded observations are over *all* the non-final observations in # the entire replay buffer. # Shapes: # padded_observations = (B, T + 1, *OBS) # padded_observations_mask = (B, T + 1) padded_observations, padded_observations_mask = ( awr_utils.pad_array_to_length(observations, t_final + 1)) batch = len(observations) self._check_shapes('padded_observations', '(batch, t_final + 1)', padded_observations, (batch, t_final + 1), array_prefix=2) self._check_shapes('padded_observations_mask', '(batch, t_final + 1)', padded_observations_mask, (batch, t_final + 1)) # Shapes: # padded_rewards = (B, T) # padded_rewards_mask = (B, T) padded_rewards, padded_rewards_mask = awr_utils.pad_array_to_length( rewards, t_final) self._check_shapes('padded_rewards', '(batch, t_final)', padded_rewards, (batch, t_final)) self._check_shapes('padded_rewards_mask', '(batch, t_final)', padded_rewards_mask, (batch, t_final)) # Shapes: # lengths = (B,) lengths = jnp.sum(padded_rewards_mask, axis=1, dtype=jnp.int32) self._check_shapes('lengths', '(batch,)', lengths, (batch, )) # TODO(pkozakowski): Pass the actual actions here, to enable autoregressive # action sampling. dummy_actions = jnp.zeros( (batch, t_final + 1) + self._action_shape, self._action_dtype, ) # Shapes: # log_probabs_traj = (B, T + 1, #controls, #actions) # value_predictions_traj = (B, T + 1) log_probabs_traj, value_predictions_traj, self._model_state, unused_rng = ( self._policy_fun_all_timesteps(padded_observations, lengths, self._model_state, self._get_rng())) self._check_shapes( 'log_probabs_traj', '(batch, t_final + 1, n_controls, n_actions)', log_probabs_traj, (batch, t_final + 1, self._n_controls, self._n_actions)) self._check_shapes('value_predictions_traj', '(batch, t_final + 1)', value_predictions_traj, (batch, t_final + 1)) # Zero out the padding's value predictions, since the net may give some # prediction to the padding observations. value_predictions_traj *= padded_observations_mask # Compute td-lambda returns, and reshape to match value_predictions_traj. list_td_lambda_returns = awr_utils.batched_compute_td_lambda_return( padded_rewards, padded_rewards_mask, value_predictions_traj, padded_observations_mask, self._gamma, self._td_lambda) if logging.vlog_is_on(1) and list_td_lambda_returns: l = len(list_td_lambda_returns) logging.vlog(1, f'Len of list_td_lambda_returns: {l}.') self._log_shape('td_lambda_returns[0]', list_td_lambda_returns[0]) # pad an extra 0 for each to match lengths of value predictions. list_target_values = [ np.pad(l, (0, 1), 'constant') for l in list_td_lambda_returns ] if batch != len(list_target_values): raise ValueError(f'batch != len(list_target_values) : ' f'{batch} vs {len(list_target_values)}') # Shape: (len(idx),) target_values = np.concatenate(list_target_values) self._check_shapes('target_values', '(len(idx),)', target_values, (len(idx), )) # Shape: (len(idx),) vals = self.flatten_vals(value_predictions_traj, padded_observations_mask) self._check_shapes('vals', '(len(idx),)', vals, (len(idx), )) # Calculate advantages. adv, norm_adv, adv_mean, adv_std = self._calc_adv( target_values, vals, valid_mask) self._check_shapes('norm_adv', '(len(idx),)', norm_adv, (len(idx), )) adv_weights, adv_weights_mean, adv_weights_min, adv_weights_max = ( self._calc_adv_weights(norm_adv, valid_mask)) self._check_shapes('adv_weights', '(len(idx),)', adv_weights, (len(idx), )) del adv, adv_mean, adv_std del adv_weights_min, adv_weights_max, adv_weights_mean combined_steps = int( jnp.ceil(self._optimization_steps * new_sample_count / self._num_samples_to_collect)) optimization_start_time = time.time() combined_losses = self._update_combined(combined_steps, valid_idx, target_values, adv_weights) optimization_time = policy_based_utils.get_time( optimization_start_time) self._epoch += 1 if self._should_write_summaries: write_metric('combined/optimization_steps', combined_steps) epoch_time = policy_based_utils.get_time(epoch_start_time) timing_dict = { 'epoch': epoch_time, 'trajectory_collection': trajectory_collection_time, 'optimization': optimization_time, 'policy_eval': policy_eval_time, } if self._should_write_summaries: for k, v in timing_dict.items(): write_metric('timing/{}'.format(k), v) # Only dump the average post losses. if combined_losses: for k, v in combined_losses.items(): if 'post_entropy' in k: write_metric(k.replace('post_entropy', 'entropy'), v) if 'post_loss' in k: write_metric(k.replace('post_loss', 'loss'), v) self.flush_summaries()
def train_epoch(self, evaluate=True): def write_metric(key, value): self._train_sw.scalar(key, value, step=self.epoch) self._history.append('train', key, self.epoch, value) # Get fresh trajectories every time. self._should_reset_train_env = True trajectory_collection_start_time = time.time() logging.vlog(1, 'AWR epoch [% 6d]: collecting trajectories.', self._epoch) trajs, _, timing_info, self._model_state = self.collect_trajectories( train=True, temperature=1.0, raw_trajectory=True) del timing_info trajectory_collection_time = ppo.get_time(trajectory_collection_start_time) # Convert these into numpy now. def extract_obs_act_rew_dones(traj_np): return traj_np[0], traj_np[1], traj_np[2], traj_np[4] trajs_np = [extract_obs_act_rew_dones(traj.as_numpy) for traj in trajs] # number of new actions. new_sample_count = sum(traj[1].shape[0] for traj in trajs_np) if self._should_write_summaries: write_metric('trajs/batch', len(trajs)) write_metric('trajs/new_sample_count', new_sample_count) # The number of trajectories, i.e. `B`can keep changing from iteration to # iteration, since we are capped on the number of observations requested. # So let's operate on each trajectory on this own? # TODO(afrozm): So should our batches look like (B, T+1, *OBS) or B # different examples of (T+1, *OBS) each. Since B can keep changing? # Add these to the replay buffer. for traj in trajs: _ = self._replay_buffer.store(traj) if self._should_write_summaries: rewards = np.array([np.sum(traj[2]) for traj in trajs_np]) avg_reward = np.mean(rewards) std_reward = np.std(rewards) max_reward = np.max(rewards) min_reward = np.min(rewards) write_metric('reward/avg', avg_reward) write_metric('reward/std', std_reward) write_metric('reward/max', max_reward) write_metric('reward/min', min_reward) # Wrap these observations/rewards inside ReplayBuffer. idx, valid_mask, valid_idx = self._replay_buffer.get_valid_indices() # pylint: disable=g-complex-comprehension observations = [ self._replay_buffer.get(replay_buffer.ReplayBuffer.OBSERVATIONS_KEY, idx[start_idx:end_plus_1_idx]) for (start_idx, end_plus_1_idx) in self._replay_buffer.iterate_over_paths(idx) ] rewards = [ self._replay_buffer.get(replay_buffer.ReplayBuffer.REWARDS_KEY, idx[start_idx:end_plus_1_idx][:-1]) for (start_idx, end_plus_1_idx) in self._replay_buffer.iterate_over_paths(idx) ] # pylint: enable=g-complex-comprehension t_final = awr_utils.padding_length(rewards, boundary=self._boundary) if self._should_write_summaries: write_metric('trajs/t_final', t_final) # These padded observations are over *all* the non-final observations in # the entire replay buffer. # Shapes: # padded_observations = (B, T + 1, *OBS) # padded_observations_mask = (B, T + 1) padded_observations, padded_observations_mask = ( awr_utils.pad_array_to_length(observations, t_final + 1) ) batch = len(observations) if ((batch, t_final + 1) != padded_observations.shape[:2] or (batch, t_final + 1) != padded_observations_mask.shape): raise ValueError( f'Shapes mismatch, batch {batch}, t_final {t_final}' f'padded_observations.shape {padded_observations.shape}' f'padded_observations_mask.shape {padded_observations_mask.shape}') # Shapes: # padded_rewards = (B, T) # padded_rewards_mask = (B, T) padded_rewards, padded_rewards_mask = awr_utils.pad_array_to_length( rewards, t_final) if ((padded_rewards.shape != (batch, t_final)) or (padded_rewards_mask.shape != (batch, t_final))): raise ValueError( f'Shapes mismatch, batch {batch}, t_final {t_final}' f'padded_rewards.shape {padded_rewards.shape}') # Shapes: # log_probabs_traj = (B, T + 1, #actions) # value_predictions_traj = (B, T + 1) (log_probabs_traj, value_predictions_traj) = ( self._policy_and_value_net_apply( padded_observations, weights=self._policy_and_value_net_weights, state=self._model_state, rng=self._get_rng(), )) if ((batch, t_final + 1) != log_probabs_traj.shape[:2] or (batch, t_final + 1) != value_predictions_traj.shape): raise ValueError( f'Shapes mismatch, batch {batch}, t_final {t_final}' f'log_probabs_traj.shape {log_probabs_traj.shape}' f'value_predictions_traj.shape {value_predictions_traj.shape}') # Zero out the padding's value predictions, since the net may give some # prediction to the padding observations. value_predictions_traj *= padded_observations_mask # Compute td-lambda returns, and reshape to match value_predictions_traj. list_td_lambda_returns = awr_utils.batched_compute_td_lambda_return( padded_rewards, padded_rewards_mask, value_predictions_traj, padded_observations_mask, self._gamma, self._td_lambda) # pad an extra 0 for each to match lengths of value predictions. list_target_values = [ onp.pad(l, (0, 1), 'constant') for l in list_td_lambda_returns ] if batch != len(list_target_values): raise ValueError(f'batch != len(list_target_values) : ' f'{batch} vs {len(list_target_values)}') # Shape: (len(idx),) target_values = onp.concatenate(list_target_values) if target_values.shape != (len(idx),): raise ValueError(f'target_values.shape != (len(idx),) = ' f'{target_values.shape} != ({len(idx)},)') # Shape: (len(idx),) target_values = onp.concatenate(list_target_values) vals = self.flatten_vals(value_predictions_traj, padded_observations_mask) if vals.shape != target_values.shape: raise ValueError(f'vals.shape != target_values.shape : ' f'{vals.shape} vs {target_values.shape}') # Calculate advantages. adv, norm_adv, adv_mean, adv_std = self._calc_adv( target_values, vals, valid_mask) adv_weights, adv_weights_mean, adv_weights_min, adv_weights_max = ( self._calc_adv_weights(norm_adv, valid_mask) ) del adv, adv_mean, adv_std del adv_weights_min, adv_weights_max, adv_weights_mean combined_steps = int( np.ceil(self._optimization_steps * new_sample_count / self._num_samples_to_collect)) combined_losses = self._update_combined(combined_steps, valid_idx, target_values, adv_weights) if self._should_write_summaries: write_metric('combined/optimization_steps', combined_steps) timing_dict = { 'trajectory_collection': trajectory_collection_time, # 'epoch': epoch_time, # 'policy_eval': policy_eval_time, # 'preprocessing': preprocessing_time, # 'log_prob_recompute': log_prob_recompute_time, # 'loss_compute': loss_compute_time, # 'optimization': optimization_time, # 'policy_save': policy_save_time, } if self._should_write_summaries: for k, v in timing_dict.items(): write_metric('timing/{}'.format(k), v) # Only dump the average post losses. if combined_losses: for k, v in combined_losses.items(): if 'post_entropy' in k: write_metric(k.replace('post_entropy', 'entropy'), v) if 'post_loss' in k: write_metric(k.replace('post_loss', 'loss'), v) self._epoch += 1 self.flush_summaries()