def after_run(self, run_context, run_values): results, step = run_values.results self._iter_count = step if not self._triggered and step != self._last_step - 1: return self._timer.update_last_triggered_step(self._iter_count - 1) if not self._model.on_horovod or self._model.hvd.rank() == 0: deco_print("Running evaluation on a validation set:") results_per_batch, total_loss = get_results_for_epoch( self._model, run_context.session, mode="eval", compute_loss=True, detailed_inference_outputs=False, ) if not self._model.on_horovod or self._model.hvd.rank() == 0: if self._print_ppl: deco_print( "Validation loss: {:.4f} | ppl = {:.4f} | bpc = {:.4f}". format(total_loss, math.exp(total_loss), total_loss / math.log(2)), offset=4) else: deco_print("Validation loss: {:.4f} ".format(total_loss), offset=4) dict_to_log = self._model.finalize_evaluation( results_per_batch, step) dict_to_log['eval_loss'] = total_loss if self._print_ppl: # Add bpc and ppl metrics to tensorboard dict_to_log['ppl'] = math.exp(total_loss) dict_to_log['bpc'] = math.exp(total_loss / math.log(2)) # saving the best validation model if self._model.params['save_checkpoint_steps'] and \ total_loss < self._best_eval_loss: self._best_eval_loss = total_loss self._eval_saver.save( run_context.session, os.path.join(self._model.params['logdir'], 'best_models', 'val_loss={:.4f}-step'.format(total_loss)), global_step=step + 1, ) # optionally logging to tensorboard any values # returned from maybe_print_logs if self._model.params['save_summaries_steps']: log_summaries_from_dict( dict_to_log, self._model.params['logdir'], step, )
def after_run(self, run_context, run_values): results, step = run_values.results self._iter_count = step if not results: return self._timer.update_last_triggered_step(self._iter_count - 1) input_values, output_values = results dict_to_log = self._model.maybe_print_logs(input_values, output_values) # optionally logging to tensorboard any values # returned from maybe_print_logs if dict_to_log: log_summaries_from_dict( dict_to_log, self._model.params['logdir'], step, )
def after_run(self, run_context, run_values): results, step = run_values.results self._iter_count = step if not self._triggered and step != self._last_step - 1: return self._timer.update_last_triggered_step(self._iter_count - 1) if not self._model.on_horovod or self._model.hvd.rank() == 0: deco_print("Running evaluation on a validation set:") results_per_batch, total_loss = get_results_for_epoch( self._model, run_context.session, mode="eval", compute_loss=True, ) if not self._model.on_horovod or self._model.hvd.rank() == 0: deco_print("Validation loss: {:.4f}".format(total_loss), offset=4) dict_to_log = self._model.finalize_evaluation(results_per_batch) dict_to_log['eval_loss'] = total_loss # saving the best validation model if total_loss < self._best_eval_loss: self._best_eval_loss = total_loss self._eval_saver.save( run_context.session, os.path.join(self._model.params['logdir'], 'best_models', 'val_loss={:.4f}-step'.format(total_loss)), global_step=step + 1, ) # optionally logging to tensorboard any values # returned from maybe_print_logs if dict_to_log: log_summaries_from_dict( dict_to_log, self._model.params['logdir'], step, )