def test_parameters_kfold_success(self): """ Test that it run an experiment sucessfully """ scaler = MinMaxScaler(copy=False, feature_range=(-1, 1)) classifier = GaussianNB() split = {'split_method': 'kfolds', 'n_splits': 3} data = 'data_0inches.npy' labels = 'labels_0inches.npy' plots = {'plots_number': 3} result = run_experiment(scaler, classifier, split, data, labels, plots) self.assertIsNotNone(result)
def test_parameters_no_split_method_fail(self): """ Test that it run an experiment unsucessfully """ scaler = MinMaxScaler(copy=False, feature_range=(-1, 1)) classifier = GaussianNB() split = {} data = 'data_0inches.npy' labels = 'labels_0inches.npy' plots = {'plots_number': 3} result = run_experiment(scaler, classifier, split, data, labels, plots) self.assertEqual(result, 1)
def _parse_arguments() -> Tuple[Namespace, str]: parser = ArgumentParser() parser.add_argument('data_file_name') parser.add_argument('method', default='nelder-mead', choices=['nelder-mead', 'evolutionary']) parser.add_argument('-e', '--experiment', required=False) parser.add_argument('-o', '--output_file', default='tmp_hyperparameter_search.csv') parser.add_argument('-r', '--results_dir', default='tmp_results_hyperparameter_search') parser.add_argument('-f', '--seed_from', default=1, type=int) parser.add_argument('-t', '--seed_to', default=5, type=int) parser.add_argument('--seed', default=42, type=int) parser.add_argument('--latent_dim_loc', default=7, type=int) parser.add_argument('--latent_dim_scale', default=3, type=int) parser.add_argument('--hidden_layer_size_loc', default=50, type=int) parser.add_argument('--hidden_layer_size_scale', default=20, type=int) parser.add_argument('--latent_dim_min', default=1, type=int) parser.add_argument('--latent_dim_max', default=100, type=int) parser.add_argument('--hidden_layer_size_min', default=1, type=int) parser.add_argument('--hidden_layer_size_max', default=200, type=int) # Parameters for the evolutionary search only. parser.add_argument('--population_size', default=100, type=int) args = parser.parse_args() data_file_name = args.data_file_name method = args.method experiment = args.experiment if not experiment: print(f'HyperSearch: Experiment name not explicitly set, using data file name <{data_file_name}>.') experiment = data_file_name output_file = args.output_file results_dir = args.results_dir if args.latent_dim_min > args.latent_dim_max: print('Latent dimension min is greater than hidden layer size max!', file=sys.stderr) quit(1) if args.hidden_layer_size_min > args.hidden_layer_size_max: print('Hidden layer size min is greater than hidden layer size max!', file=sys.stderr) quit(1) if os.path.exists(output_file): print(f'HyperSearch: Output file <{output_file}> exists. Aborting.', file=sys.stderr) quit(1) dry_run_successful = run_experiment(data_file_name, ['with', experiment], results_dir=results_dir, dry_run=True) if not dry_run_successful: print('HyperSearch: Dry run of experiment setup failed. Aborting.', file=sys.stderr) quit(1) print('HyperSearch: Dry run of experiment setup successful. Continuing to hyperparameter search.') return args, experiment
def _evaluate_parameters_single_seed(seed: int) -> Optional[Tuple[str, float]]: config_updates = { 'seed': seed, 'latent_dim': candidate.latent_dim, 'observation_model': [f'Linear(in_features, {candidate.hidden_layer_size})', 'Tanh()', f'Linear({candidate.hidden_layer_size}, out_features)'] } try: run = run_experiment(args.data_file_name, ['with', experiment], results_dir=args.results_dir, config_updates=config_updates, debug=True) except Exception as e: print(f'HyperSearch: A run failed with an exception: {e}', file=sys.stderr) return None config = ExperimentConfig.from_dict(run.config) result = ExperimentResult.from_dict(config, run.config, run.experiment_info, run.result) _, (obs_rollouts, _), _ = compute_rollout(config, result, config.N) fitness = 0.0 for n, obs_rollout in enumerate(obs_rollouts): fitness += np.sqrt(((obs_rollout - result.observations) ** 2).mean()) return list(filter(lambda obj: isinstance(obj, FileStorageObserver), run.observers))[0].dir, fitness / len(obs_rollouts)
from src.experiment import run_experiment params = [ [('simple_fc', 1e-3, 1), {}], # simple_fc(LR, EPOCHS), {default data} [('simple_fc2', 1e-4, 1e-5, 1), {}], # simple_fc2(LR, WD, EPOCHS), {default data} ] reports_fname = run_experiment(params) print(reports_fname)