def _initial_performance(self, session): val_start = dt.now() validation_performance = session.test_model(self.val_loader) target_performance = Evaluator.means_over_subsets( validation_performance)['distance'] val_time = dt.now() - val_start Evaluator.print_result_summary_flat(validation_performance, '\t') print('\t\tThat took {} milliseconds.'.format(val_time.microseconds // 1000)) return target_performance, copy.deepcopy(session.model.state_dict())
def _logging(self, log, loss, train_performance, val_performance, i): for key, value in train_performance['DEFAULT'].items(): log['train'][key].append(value) for subset_key, subset_performance in val_performance.items(): for key, value in subset_performance.items(): log['val'][subset_key][key].append(value) if self.config['verbose']: print('Iteration {}/{}:'.format(i, self.iters_per_epoch - 1, loss)) print('Loss: {:.2e}'.format(loss)) Evaluator.print_result_summary_flat(train_performance, '\tTraining : ') Evaluator.print_result_summary_flat(val_performance, '\tValidation: ') print()