def run_experiment(model_name, config, experiment_root=None, skip_features=False, skip_training=False): """Run an experiment using the specified input feature Parameters ---------- model_name : str Name of the NN model configuration [in models.py]. """ logger.info("run_experiment(model_name='{}')".format(model_name)) config = C.Config.load(config) experiment_name = "{}{}".format( "{}_".format(experiment_root) if experiment_root else "", model_name) logger.info("Running Experiment: {}".format( utils.colored(experiment_name, 'magenta'))) driver = hcnn.driver.Driver(config, model_name=model_name, experiment_name=experiment_name, load_features=True, skip_features=skip_features, skip_training=skip_training, skip_cleaning=skip_training) result = driver.fit_and_predict_cross_validation() return result
def predict(config, experiment_name, test_set, model_name, select_epoch=None): """Predict results on all datasets and report results. Parameters ---------- config : str experiment_name : str Name of the experiment. Files are saved in a folder of this name. model_name : str Name of the model to use for training. Must match the training configuration. select_epoch : str or None Which model params to select. Use the epoch number for this, for instance "1830" would use the model file "params1830.npz". If None, uses "final.npz" """ print(utils.colored("Evaluating")) config = C.Config.load(config) driver = hcnn.driver.Driver(config, model_name=model_name, experiment_name=experiment_name, load_features=True) results = driver.predict(select_epoch) logger.info("Generated results for {} files.".format(len(results)))
def train(config, experiment_name, test_set, model_name): """Run training loop. Parameters ---------- config : str Full path experiment_name : str Name of the experiment. Files are saved in a folder of this name. test_set : str String in ["rwc", "uiowa", "philharmonia"] specifying which dataset to use as the test set. model_name : str Name of the model to use for training. """ print(utils.colored("Training experiment: {}".format(experiment_name))) logger.info("Training model '{}' with test_set '{}'" .format(model_name, test_set)) driver = hcnn.driver.Driver(config, test_set, model_name=model_name, experiment_name=experiment_name, load_features=True) return driver.train_model()
def run_tests(mode): logger.info("run_tests(mode='{}')".format(mode)) config = INT_CONFIG_PATH results = [] if mode in ['all', 'unit']: run_unit_tests() if mode in ['data']: results.append(test_data(config)) if mode in ['all', 'model']: results.append(integration_test(config)) return all(results)
def clean(config_path, force=False): """Clean dataframes and extracted audio/features.""" config = C.Config.load(config_path) data_path = os.path.expanduser(config['paths/feature_dir']) # Clean data if not force: answer = input("Are you sure you want to delete {} (y|s to skip): " .format(data_path)) if answer in ['y', 'Y']: pass elif answer in ['s', 'S']: return True else: print("Exiting") sys.exit(1) shutil.rmtree(data_path) logger.info("clean done.") return True
def handle_arguments(arguments): config = CONFIG_PATH logger.debug(arguments) # Run modes if arguments['run']: model = arguments['<model>'] skip_training = arguments['--skip_training'] skip_features = arguments['--skip_features'] logger.info("Run Mode; model={}".format(model)) if model: result = run_experiment(model, config, skip_features=skip_features, skip_training=skip_training) else: result = run_all_experiments(config, skip_features=skip_features, skip_training=skip_training) elif arguments['extract_features']: logger.info('Extracting features.') result = extract_features(config) # Basic Experiment modes elif arguments['experiment']: if arguments['fit_and_predict']: mode = 'fit_and_predict' elif arguments['train']: mode = 'train' elif arguments['predict']: mode = 'predict' elif arguments['analyze']: mode = 'analyze' else: # docopt should not allow us to get here. raise ValueError("No valid experiment mode set.") experiment_name = arguments['<experiment_name>'] test_set = arguments['<test_set>'] model = arguments['<model>'] logger.info("Running experiment '{}' with test_set '{}' " "using model '{}'".format( experiment_name, test_set, model)) # Use the 'mode' to select the function to call. result = globals().get(mode)(config, experiment_name, test_set, model) # Test modes elif arguments['test']: test_type = 'all' if arguments['data']: test_type = 'data' elif arguments['model']: test_type = 'model' elif arguments['unit']: test_type = 'unit' logger.info('Running {} tests'.format(test_type)) result = run_tests(test_type) elif arguments['collect_results']: experiment_name = arguments.get('<experiment_name>', None) destination = arguments['<results_destination>'] integration_mode = arguments['--integration'] result = collect_results(config, destination, experiment_name, use_integration=integration_mode) return result