def multi_run_main(config): print_config(config) set_random_seed(config['random_seed']) hyperparams = [] for k, v in config.items(): if isinstance(v, list): hyperparams.append(k) scores = [] configs = grid(config) for cnf in configs: print('\n') for k in hyperparams: cnf['out_dir'] += '_{}_{}'.format(k, cnf[k]) print(cnf['out_dir']) model = ModelHandler(cnf) dev_metrics = model.train() test_metrics = model.test() scores.append(test_metrics[model.model.metric_name]) print('Average score: {}'.format(np.mean(scores))) print('Std score: {}'.format(np.std(scores)))
def grid_search_main(config): print_config(config) set_random_seed(config['random_seed']) grid_search_hyperparams = [] for k, v in config.items(): if isinstance(v, list): grid_search_hyperparams.append(k) best_config = None best_metric = None best_score = -1 configs = grid(config) for cnf in configs: print('\n') pretrained = True if cnf['out_dir'] is None else False for k in grid_search_hyperparams: if pretrained: cnf['pretrained'] += '_{}_{}'.format(k, cnf[k]) else: cnf['out_dir'] += '_{}_{}'.format(k, cnf[k]) if pretrained: print(cnf['pretrained']) else: print(cnf['out_dir']) model = ModelHandler(cnf) dev_metrics = model.train() if best_score < dev_metrics[cnf['eary_stop_metric']]: best_score = dev_metrics[cnf['eary_stop_metric']] best_config = cnf best_metric = dev_metrics print('Found a better configuration: {}'.format(best_score)) print('\nBest configuration:') for k in grid_search_hyperparams: print('{}: {}'.format(k, best_config[k])) print('Best score: {}'.format(best_score))
def main(config): print_config(config) set_random_seed(config['random_seed']) model = ModelHandler(config) model.train() model.test()