def evaluate(hp: dict, extra: ExtraArgs, trial_id=0) -> Tuple[float, GridCellMeasurements]: """ :return: a tuple composed of - a float specifying the score used for the random search (usually the validation accuracy) - a GridCellMeasurements object """ if trial_id == 0: print(hp) train = extra.ds['train'] val = extra.ds['validation'] test = None if extra.is_final_trials: #train = SNLIDataset.merge_folds([extra.ds['train'], val]) val = None test = extra.ds['test'] model = ESNModelSNLI(hp, logname=args.logname) if trial_id == 0: print(f"# parameters: {count_parameters(model)}") model.fit(train, val) train_perf, val_perf, test_perf = model.performance(train, val, test) if extra.is_final_trials: # Save the model datet = datetime.now().strftime('%b%d_%H-%M-%S') filename = f'SNLI_leaky-esn-attn_{datet}_{trial_id}_{round(test_perf*100, 1)}.pt' torch.save(model.state_dict(), filename) metric_type = PerformanceMetrics.accuracy measurements = GridCellMeasurements( train_perf=train_perf, val_perf=val_perf, test_perf=test_perf, metric=metric_type.name, training_time=model.training_time, extra={ metric_type.name: { 'train': train_perf, 'val': val_perf, 'test': test_perf, } }, actual_epochs=model.actual_epochs ) loss = 1/val_perf if val_perf > 0 else float('inf') return loss, measurements
def evaluate(hp: dict, extra: ExtraArgs, trial_id=0) -> Tuple[float, GridCellMeasurements]: """ :return: a tuple composed of - a float specifying the score used for the random search (usually the validation accuracy) - a GridCellMeasurements object """ if trial_id == 0: print(hp) train = extra.ds['train'] val = extra.ds['validation'] test = None if extra.is_final_trials: train = SMSSpamDataset.merge_folds([extra.ds['train'], val]) val = None test = extra.ds['test'] model = ESNModelSMSEnsemble(n_models=hp['n_ensemble'], input_size=300, reservoir_size=hp['reservoir_size'], alpha=hp['r_alpha'], rescaling_method='specrad', hp=hp) # Find the best value for the regularization parameter. # FIXME #if not extra.is_final_trials: # best_alpha = model.find_best_alpha(train, val, hp['n_batch']) # hp['r_alpha'] = best_alpha if trial_id == 0: print(f"# parameters: {6*hp['reservoir_size']*hp['n_ensemble']}") model.fit(train) train_perf, val_perf, test_perf = model.performance(train, val, test) train_perf_f1, val_perf_f1, test_perf_f1 = model.performance_f1( train, val, test) train_perf_mcc, val_perf_mcc, test_perf_mcc = model.performance_mcc( train, val, test) if extra.is_final_trials: # Save the model datet = datetime.now().strftime('%b%d_%H-%M-%S') filename = f'QC_leaky-esn-ensemble_{datet}_{trial_id}_{round(test_perf*100, 1)}.pt' torch.save(model.state_dict(), filename) metric_type = PerformanceMetrics.accuracy measurements = GridCellMeasurements( train_perf=train_perf, val_perf=val_perf, test_perf=test_perf, metric=metric_type.name, training_time=model.training_time, extra={ metric_type.name: { 'train': train_perf, 'val': val_perf, 'test': test_perf, }, PerformanceMetrics.macro_f1.name: { 'train': train_perf_f1, 'val': val_perf_f1, 'test': test_perf_f1 }, PerformanceMetrics.matthews_corrcoef.name: { 'train': train_perf_mcc, 'val': val_perf_mcc, 'test': test_perf_mcc } }) loss = 1 / val_perf if val_perf > 0 else float('inf') return loss, measurements