def process_jobs(config): common_config = config.common; for job in config.jobs: log.info('Processing job {} with base {}'.format(job.name, job.base)); job_config = merge_params(common_config, config[job.base]); log.debug('job overrides: {}'.format(job.overrides)); job_config = merge_params(job_config, job.overrides); job_config.experiment_root = os.path.join( config.output_root, job_config.type, job.name ); log.debug('experiment root: {}'.format(job_config.experiment_root)); print job_config; # try: if job_config.type == 'cnn': train_convnet(job_config); elif job_config.type == 'fftcnn': train_convnet(job_config); elif job_config.type == 'sda': train_mlp(job_config); else: log.error('unsupported job type {}'.format(job_config.type));
def process_jobs(config): common_config = config.common for job in config.jobs: log.info('Processing job {} with base {}'.format(job.name, job.base)) job_config = merge_params(common_config, config[job.base]) log.debug('job overrides: {}'.format(job.overrides)) job_config = merge_params(job_config, job.overrides) job_config.experiment_root = os.path.join(config.output_root, job_config.type, job.name) log.debug('experiment root: {}'.format(job_config.experiment_root)) print job_config # try: if job_config.type == 'cnn': train_convnet(job_config) elif job_config.type == 'fftcnn': train_convnet(job_config) elif job_config.type == 'sda': train_mlp(job_config) else: log.error('unsupported job type {}'.format(job_config.type))
def load_datasets_for_subjects(dataset_params, subjects, suffix=''): datasets = {} for key, params in dataset_params.items(): if not key in dataset_names: continue; params['subjects'] = subjects; params['name'] = params['name']+suffix; dataset_config = merge_params(config, params); dataset, dataset_yaml = load_yaml_file( os.path.join(os.path.dirname(__file__), 'run', 'dataset_template.yaml'), params=dataset_config, ); # log.info('dataset loaded. X={} y={}'.format(dataset.X.shape, dataset.y.shape)); datasets[key+suffix] = dataset; del dataset, dataset_yaml; return datasets;
def run_experiment(config, hyper_params, random_seeds): experiment_root = hyper_params['experiment_root']; best_acc = -1; best_results = [np.NAN, np.NAN, np.NAN]; for seed in random_seeds: hyper_params['random_seed'] = seed; hyper_params['experiment_root'] = experiment_root + '.' + str(seed); params = merge_params(config, hyper_params); if os.path.exists(os.path.join(params.experiment_root, 'mlp.pkl')): print 'found existing mlp.pkl: {}'.format(params.experiment_root); else: print 'no mlp.pkl found at: {}'.format(params.experiment_root); if not config.get('only_extract_results', False): train_convnet(params); try: values = extract_results(params.experiment_root, mode='misclass'); results = np.multiply(100, [ # 1 - values['test_y_misclass'], # 1 - values['test_wseq_misclass_rate'], # 1 - values['test_wtrial_misclass_rate']]); 1 - values['frame_misclass'], 1 - values['sequence_misclass'], 1 - values['trial_misclass']]); # save the best results if np.max(results[2]) > best_acc: best_results = results; best_acc = np.max(results[2]); except: print traceback.format_exc(); results = [np.NAN, np.NAN, np.NAN]; print 'results for seed {}: {}'.format(seed, results); if params.save_output: output = extract_output(params, values['best_epoch']); save(os.path.join(params.experiment_root, 'best_output.pklz'), output); print 'best results: {}'.format(best_results); return best_results;
def run_experiment(config, hyper_params, random_seeds): if config.global_sda == False: hyper_params = fix_local_sda_config(hyper_params) experiment_root = hyper_params['experiment_root'] best_acc = -1 best_results = [np.NAN, np.NAN, np.NAN] for seed in random_seeds: hyper_params['random_seed'] = seed hyper_params['experiment_root'] = experiment_root + '.' + str(seed) params = merge_params(config, hyper_params) if os.path.exists(os.path.join(params.experiment_root, 'mlp.pkl')): print 'found existing mlp.pkl: {}'.format(params.experiment_root) else: print 'no mlp.pkl found at: {}'.format(params.experiment_root) if not config.get('only_extract_results', False): train_mlp(params) try: values = extract_results(params.experiment_root, mode='misclass') results = np.multiply( 100, [ # 1 - values['test_y_misclass'], # 1 - values['test_wseq_misclass_rate'], # 1 - values['test_wtrial_misclass_rate']]); 1 - values['frame_misclass'], 1 - values['sequence_misclass'], 1 - values['trial_misclass'] ]) # save the best results if np.max(results[2]) > best_acc: best_results = results best_acc = np.max(results[2]) except: print traceback.format_exc() results = [np.NAN, np.NAN, np.NAN] print 'results for seed {}: {}'.format(seed, results) print 'best results: {}'.format(best_results) return best_results
def load_datasets_for_subjects(dataset_params, subjects, suffix=''): datasets = {} for key, params in dataset_params.items(): if not key in dataset_names: continue params['subjects'] = subjects params['name'] = params['name'] + suffix dataset_config = merge_params(config, params) dataset, dataset_yaml = load_yaml_file( os.path.join(os.path.dirname(__file__), 'run', 'dataset_template.yaml'), params=dataset_config, ) # log.info('dataset loaded. X={} y={}'.format(dataset.X.shape, dataset.y.shape)); datasets[key + suffix] = dataset del dataset, dataset_yaml return datasets
with log_timing(log, 'training MLP'): train.main_loop(); log.info('done'); def get_default_config_path(): return os.path.join(os.path.dirname(__file__),'train_sda_mlp.cfg'); if __name__ == '__main__': # config = load_config(default_config='../../train_sda.cfg', reset_logging=False); config = load_config(default_config=get_default_config_path(), reset_logging=False); hyper_params = { }; params = merge_params(config, hyper_params); if not config.get('only_extract_results', False): train_mlp(params); scan_for_best_performance(params.experiment_root, 'valid_y_misclass'); scan_for_best_performance(params.experiment_root, 'valid_ptrial_misclass_rate') values = extract_results(config.experiment_root, mode='misclass'); print np.multiply(100, [ # 1 - values['test_y_misclass'], # 1 - values['test_wseq_misclass_rate'], # 1 - values['test_wtrial_misclass_rate']]); 1 - values['frame_misclass'],
log.debug('job overrides: {}'.format(job.overrides)) job_config = merge_params(job_config, job.overrides) job_config.experiment_root = os.path.join(config.output_root, job_config.type, job.name) log.debug('experiment root: {}'.format(job_config.experiment_root)) print job_config # try: if job_config.type == 'cnn': train_convnet(job_config) elif job_config.type == 'fftcnn': train_convnet(job_config) elif job_config.type == 'sda': train_mlp(job_config) else: log.error('unsupported job type {}'.format(job_config.type)) # except: # log.fatal("Unexpected error:", sys.exc_info()); if __name__ == '__main__': default_config = os.path.join(os.path.dirname(__file__), 'batch.cfg') config = load_config(default_config=default_config, reset_logging=False) config = merge_params(Config(file(default_config)), config) process_jobs(config)
log.info('done') def get_default_config_path(): return os.path.join(os.path.dirname(__file__), 'train_sda_mlp.cfg') if __name__ == '__main__': # config = load_config(default_config='../../train_sda.cfg', reset_logging=False); config = load_config(default_config=get_default_config_path(), reset_logging=False) hyper_params = {} params = merge_params(config, hyper_params) if not config.get('only_extract_results', False): train_mlp(params) scan_for_best_performance(params.experiment_root, 'valid_y_misclass') scan_for_best_performance(params.experiment_root, 'valid_ptrial_misclass_rate') values = extract_results(config.experiment_root, mode='misclass') print np.multiply( 100, [ # 1 - values['test_y_misclass'], # 1 - values['test_wseq_misclass_rate'],
job_config.experiment_root = os.path.join( config.output_root, job_config.type, job.name ); log.debug('experiment root: {}'.format(job_config.experiment_root)); print job_config; # try: if job_config.type == 'cnn': train_convnet(job_config); elif job_config.type == 'fftcnn': train_convnet(job_config); elif job_config.type == 'sda': train_mlp(job_config); else: log.error('unsupported job type {}'.format(job_config.type)); # except: # log.fatal("Unexpected error:", sys.exc_info()); if __name__ == '__main__': default_config = os.path.join(os.path.dirname(__file__), 'batch.cfg'); config = load_config(default_config=default_config, reset_logging=False); config = merge_params(Config(file(default_config)), config); process_jobs(config);
def pair_cross_trial_test(config, pairs=None): if pairs is None: pairs = [ [18, 19], [20, 21], [22, 23], [0, 1], [15, 9], [16, 17], [11, 5], [12, 6], [2, 3], [10, 4], [13, 7], [14, 8], ] # config.experiment_root = os.path.join(config.experiment_root, 'cross-trial') ; accuracy = np.zeros(len(pairs)) results = np.zeros([len(pairs), 3]) for i in xrange(len(pairs)): test_stimulus_ids = pairs[i] train_stimulus_ids = set() for j in xrange(48): if not j in test_stimulus_ids: train_stimulus_ids.add(j) train_stimulus_ids = list(train_stimulus_ids) log.info('training stimuli: {} \t test stimuli: {}'.format( train_stimulus_ids, test_stimulus_ids)) hyper_params = { 'experiment_root': os.path.join(config.experiment_root, 'pair' + str(pairs[i])), 'remove_train_stimulus_ids': test_stimulus_ids, 'remove_test_stimulus_ids': train_stimulus_ids, } params = merge_params(config, hyper_params) # dummy values for testing # results[i] = [i, i*10, i*100.]; # accuracy[i] = 0; # continue; if os.path.exists(os.path.join(params.experiment_root, 'mlp.pkl')): print 'found existing mlp.pkl: {}'.format(params.experiment_root) else: print 'no mlp.pkl found at: {}'.format(params.experiment_root) if not config.get('only_extract_results', False): train_mlp(params) values = extract_results(params.experiment_root, mode='misclass') results[i] = np.multiply( 100, [ # 1 - values['test_y_misclass'], # 1 - values['test_wseq_misclass_rate'], # 1 - values['test_wtrial_misclass_rate']]); 1 - values['frame_misclass'], 1 - values['sequence_misclass'], 1 - values['trial_misclass'] ]) accuracy[i] = 100 * (1 - extract_best_result( params.experiment_root, mode='misclass', check_dataset='test')[0]) print results print results.mean(axis=0) print results.max(axis=1) print accuracy print accuracy.mean() return results, accuracy
def pair_cross_trial_test(config, pairs=None): if pairs is None: pairs = [ [18, 19], [20, 21], [22, 23], [ 0, 1], [15, 9], [16, 17], [11, 5], [12, 6], [ 2, 3], [10, 4], [13, 7], [14, 8], ]; # config.experiment_root = os.path.join(config.experiment_root, 'cross-trial') ; accuracy = np.zeros(len(pairs)) results = np.zeros([len(pairs),3]); for i in xrange(len(pairs)): test_stimulus_ids = pairs[i]; train_stimulus_ids = set(); for j in xrange(48): if not j in test_stimulus_ids: train_stimulus_ids.add(j); train_stimulus_ids = list(train_stimulus_ids); log.info('training stimuli: {} \t test stimuli: {}'.format(train_stimulus_ids, test_stimulus_ids)); hyper_params = { 'experiment_root' : os.path.join(config.experiment_root, 'pair'+str(pairs[i])), 'remove_train_stimulus_ids' : test_stimulus_ids, 'remove_test_stimulus_ids' : train_stimulus_ids, }; params = merge_params(config, hyper_params); # dummy values for testing # results[i] = [i, i*10, i*100.]; # accuracy[i] = 0; # continue; if os.path.exists(os.path.join(params.experiment_root, 'mlp.pkl')): print 'found existing mlp.pkl: {}'.format(params.experiment_root); else: print 'no mlp.pkl found at: {}'.format(params.experiment_root); if not config.get('only_extract_results', False): train_mlp(params); values = extract_results(params.experiment_root, mode='misclass'); results[i] = np.multiply(100, [ # 1 - values['test_y_misclass'], # 1 - values['test_wseq_misclass_rate'], # 1 - values['test_wtrial_misclass_rate']]); 1 - values['frame_misclass'], 1 - values['sequence_misclass'], 1 - values['trial_misclass']]); accuracy[i] = 100 * (1 - extract_best_result(params.experiment_root, mode='misclass', check_dataset='test')[0]); print results; print results.mean(axis=0); print results.max(axis=1); print accuracy; print accuracy.mean(); return results, accuracy;