def summary(self, extended=False): subsection('Training parameters') settings = {"idx": self.idx, "model size": self.model_size} if extended: settings.update({ "training size": self.training_size, "validation size": self.validation_size, "batch size": self.batch_size }) display_settings(settings) subsection("Hyper-parameters") # group params data_by_group = defaultdict(dict) for data in self.hyper_parameters.values(): data_by_group[data['group']][data['name']] = data['value'] # Generate the table. rows = [['Group', 'Hyper-parameter', 'Value']] idx = 0 for grp in sorted(data_by_group.keys()): for param, value in data_by_group[grp].items(): row = [grp, param, value] if idx % 2: row = colorize_row(row, 'cyan') rows.append(row) idx += 1 display_table(rows)
def config_summary(self): subsection("Hyper-parmeters search space") display_setting("distribution type: %s" % self.name) # Compute the size of the hyperparam space by generating a model total_size = 1 data_by_group = defaultdict(dict) group_size = defaultdict(lambda: 1) for data in self._hyperparameters_config.values(): data_by_group[data['group']][data['name']] = data['space_size'] group_size[data['group']] *= data['space_size'] total_size *= data['space_size'] # Generate the table. rows = [['Hyper-parameter', 'Search space']] for grp in sorted(data_by_group.keys()): row = ["%s total" % grp, ''] row = colorize_row(row, 'cyan') rows.append(row) for param, size in data_by_group[grp].items(): rows.append(["|-%s" % param, size]) # rows.append(['', '']) rows.append( [colorize('total', 'green'), colorize(total_size, 'green')]) display_table(rows)
def on_train_end(self, logs={}): # train summary if self.num_executions + 1 == self.max_excutions: curr = self.instance_state.agg_metrics.to_config() best = self.tuner_state.best_instance_config['aggregate_metrics'] rows = [['Name', 'Best model', 'Current model']] for idx, metric in enumerate(best): best_value = round(metric['best_value'], 4) curr_value = round(curr[idx]['best_value'], 4) row = [metric['name'], best_value, curr_value] if metric['is_objective']: if best_value == curr_value: row = colorize_row(row, 'green') else: row = colorize_row(row, 'red') rows.append(row) display_table(rows) # tuning budget exhausted if self.tuner_state.remaining_budget < 1: highlight("Hypertuning complete - results in %s" % self.tuner_state.host.results_dir) # FIXME: final summary else: highlight("%d/%d epochs tuning budget left" % (self.tuner_state.remaining_budget, self.tuner_state.epoch_budget))
def summary(self, extended=False): """Display a table containing the name and best/last value for each metric.""" rows = [['name', 'best', 'last']] for m in self.to_list(): row = [ m.name, m.get_best_value(), m.get_last_value(), ] rows.append(row) display_table(rows)
def display_hparams(hyper_parameters, hyper_parameters_group, num_models, use_colors): "Display hyper parameters values for the top models" rows = [] headers = ['hyper-parameter'] + ["model %s" % x for x in range(num_models)] rows.append(headers) # hyper params for grp, hparams in hyper_parameters_group.items(): row = [grp] + [''] * num_models if use_colors: row = colorize_row(row, 'yellow') rows.append(row) hparams = sorted(hparams) for idx, p in enumerate(hparams): row = ["|-" + hyper_parameters[p][0]['name']] row.extend([v['value'] for v in hyper_parameters[p]]) if use_colors and idx % 2: row = colorize_row(row, 'cyan') rows.append(row) display_table(rows)
def display_metrics(main_metric, main_metric_values, other_metrics, other_metrics_values, num_models, use_colors): rows = [] headers = ['metric'] + ["model %s" % x for x in range(num_models)] rows.append(headers) # main metric first row = [main_metric] row.extend(main_metric_values) if use_colors: row = colorize_row(row, 'green') rows.append(row) # other metric for idx, metric in enumerate(other_metrics): row = [metric] row.extend(other_metrics_values[metric]) if use_colors and idx % 2: row = colorize_row(row, 'cyan') rows.append(row) display_table(rows)