def test_prepare_emodel_dirs_multi_process(): """prepare_combos.prepare_emodel_dirs_multi_process: test prepare_emodel_dirs based on test example 'simple1' using multiprocessing. """ # create test directory, where output of this test will be created test_dir = os.path.join(TMP_DIR, 'test_prepare_emodel_dirs') tools.makedirs(test_dir) # input parameters final_dict = {'emodel1': {'main_path': '.', 'seed': 2, 'rank': 0, 'notes': '', 'branch': 'emodel1', 'params': {'cm': 1.0}, 'fitness': {'Step1.SpikeCount': 20.0}, 'score': 104.72906197480131, 'morph_path': 'morphologies/morph1.asc'}, 'emodel2': {'main_path': '.', 'seed': 2, 'rank': 0, 'notes': '', 'branch': 'emodel2', 'params': {'cm': 0.5}, 'fitness': {'Step1.SpikeCount': 20.0}, 'score': 104.72906197480131, 'morph_path': 'morphologies/morph2.asc'}} emodel_etype_map = {'emodel1': {'mm_recipe': 'emodel1', 'etype': 'etype1', 'layer': ['1', 'str1']}, 'emodel2': {'mm_recipe': 'emodel2', 'etype': 'etype2', 'layer': ['1', '2']}} emodels_dir = os.path.join(test_dir, 'tmp/emodels/') opt_dir = os.path.join(TEST_DATA_DIR, 'data/emodels_dir/subdir/') emodels_hoc_dir = os.path.join(test_dir, './output/emodels_hoc/') hoc_template = os.path.join( TEMPLATE_DIR, "cell_template_neurodamus.jinja2" ) hoc_template = os.path.abspath(hoc_template) emodels_in_repo = False continu = False # run function with tools.cd(TEST_DATA_DIR): ret = prepare_emodel_dirs.prepare_emodel_dirs( final_dict, emodel_etype_map, emodels_dir, opt_dir, emodels_hoc_dir, emodels_in_repo, hoc_template, continu, n_processes=None) # verify output expected_ret = {emodel: os.path.join( emodels_dir, emodel) for emodel in final_dict} nt.assert_dict_equal(ret, expected_ret) nt.assert_true(os.path.isdir(emodels_dir)) nt.assert_true(os.path.isdir(emodels_hoc_dir))
def _prepare_config_json(original_filename, test_dir): """Helper function to prepare new configuration file.""" config = tools.load_json(original_filename) config['tmp_dir'] = os.path.join(test_dir, 'tmp') config['output_dir'] = os.path.join(test_dir, 'output') config['scores_db'] = os.path.join(config['output_dir'], 'scores.sqlite') config['emodels_hoc_dir'] = os.path.join(config['output_dir'], 'emodels_hoc') tools.makedirs(test_dir) return tools.write_json(test_dir, 'config.json', config)
def test_json(): """bluepymm.tools: test load_json and write_json""" output_dir = TMP_DIR output_name = 'tmp.json' config = {'test': ['1', 'two']} tools.makedirs(output_dir) ret_path = tools.write_json(output_dir, output_name, config) nt.assert_equal(os.path.join(output_dir, output_name), ret_path) ret = tools.load_json(ret_path) nt.assert_dict_equal(config, ret)
def test_save_megate_results_compliant(): """bluepymm.select_combos: test save_megate_results neuron compliant.""" data = [('morph1', 1, 'mtype1', 'etype1', 'emodel1', 'emodel1_mtype1_1_morph1', '', ''), ('morph2', 1, 'mtype2', 'etype1', 'emodel1', 'emodel1_mtype2_1_morph2', '', ''), ('morph1', 1, 'mtype1', 'etype2', 'emodel2', 'emodel2_mtype1_1_morph1', '', '')] test_dir = os.path.join(TMP_DIR, 'test_save_megate_results_compliant') tools.makedirs(test_dir) _test_save_megate_results(data, None, test_dir, True)
def test_save_megate_results_sort(): """bluepymm.select_combos: test save_megate_results sorted.""" data = [('morph1', 1, 'mtype1', 'etype1', 'emodel1', 'emodel1_mtype1_1_morph1', '', ''), ('morph1', 1, 'mtype1', 'etype2', 'emodel2', 'emodel2_mtype1_1_morph1', '', ''), ('morph2', 1, 'mtype2', 'etype1', 'emodel1', 'emodel1_mtype2_1_morph2', '', '')] test_dir = os.path.join(TMP_DIR, 'test_save_megate_results_sort') tools.makedirs(test_dir) _test_save_megate_results(data, 'combo_name', test_dir, False)
def pdf_file(pdf_filename): """Create and return a PDF file. Args: pdf_filename: path to PDF file Returns: A multi-page PDF file. """ tools.makedirs(os.path.dirname(pdf_filename)) return PdfPages(pdf_filename)
def prepare_emodel_dirs( final_dict, emodel_etype_map, emodels_dir, opt_dir, emodels_hoc_dir, emodels_in_repo, continu=False): """Prepare the directories for the emodels. Args: final_dict: final e-model map emodel_etype_map: e-model e-type map emodels_dir: absolute path to the directory with all e-models. This directory is created by this function if it does not exist yet. opt_dir: directory with all opt e-models (TODO: clarify) emodels_hoc_dir: absolute path to the directory to which the .hoc files will be written out. Created by this function if it does not exist yet. emodels_in_repo: True if the input e-models are organized in separate branches of a git repository, false if the e-models are organized into separate subdirectories. continu: True if this BluePyMM run builds on a previous run, False otherwise. Default is False. Return: A dict mapping e-models to prepared e-model directories. """ tools.makedirs(emodels_dir) tools.makedirs(emodels_hoc_dir) arg_list = [] for original_emodel in emodel_etype_map: emodel = emodel_etype_map[original_emodel]['mm_recipe'] emodel_dict = final_dict[original_emodel] arg_list.append( (original_emodel, emodel, emodel_dict, emodels_dir, opt_dir, emodels_hoc_dir, emodels_in_repo, continu)) print('Parallelising preparation of e-model directories') pool = multiprocessing.Pool(maxtasksperchild=1) emodel_dirs = {} for emodel_dir_dict in pool.map(prepare_emodel_dir, arg_list, chunksize=1): for emodel, emodel_dir in emodel_dir_dict.items(): emodel_dirs[emodel] = emodel_dir return emodel_dirs
def test_prepare_emodel_dir(): """prepare_combos.prepare_emodel_dirs: test prepare_emodel_dir based on test example 'simple1'. """ # create test directory, where output of this test will be created test_dir = os.path.join(TMP_DIR, 'test_prepare_emodel_dir') tools.makedirs(test_dir) # input parameters original_emodel = 'emodel1' emodel = 'emodel1' emodel_dict = { 'main_path': '.', 'seed': 2, 'rank': 0, 'notes': '', 'branch': 'emodel1', 'params': { 'cm': 1.0 }, 'fitness': { 'Step1.SpikeCount': 20.0 }, 'score': 104.72906197480131, 'morph_path': 'morphologies/morph1.asc' } emodels_dir = os.path.join(test_dir, 'tmp/emodels/') opt_dir = os.path.join(TEST_DATA_DIR, 'data/emodels_dir/subdir/') hoc_dir = os.path.join(test_dir, 'output/emodels_hoc/') emodels_in_repo = False continu = False with tools.cd(TEST_DATA_DIR): # create output directories and run function for path in [emodels_dir, hoc_dir]: tools.makedirs(path) arg_list = (original_emodel, emodel, emodel_dict, emodels_dir, opt_dir, os.path.abspath(hoc_dir), emodels_in_repo, continu) ret = prepare_emodel_dirs.prepare_emodel_dir(arg_list) # test side effects: creation of .hoc-file nt.assert_true(os.path.isdir(os.path.join(emodels_dir, emodel))) hoc_path = os.path.join(hoc_dir, '{}.hoc'.format(emodel)) nt.assert_true(os.path.isfile(hoc_path)) # test returned dict expected_emodel_dir = os.path.join(emodels_dir, emodel) expected_ret = { emodel: expected_emodel_dir, original_emodel: expected_emodel_dir } nt.assert_dict_equal(ret, expected_ret)
def test_makedirs(): """bluepymm.tools: test makedirs""" make_dir = os.path.join(TMP_DIR, 'make_dir') tools.makedirs(make_dir) nt.assert_true(os.path.isdir(make_dir)) # try again -> no error make_dir = os.path.join(TMP_DIR, 'make_dir') tools.makedirs(make_dir) nt.assert_true(os.path.isdir(make_dir)) # causes error that is not caught nt.assert_raises(OSError, tools.makedirs, '')
def prepare_combos(conf_filename, continu, n_processes=None): """Prepare combos""" print('Reading configuration at %s' % conf_filename) conf_dict = tools.load_json(conf_filename) scores_db_path = os.path.abspath(conf_dict['scores_db']) final_dict, emodel_dirs = prepare_emodels(conf_dict, continu, scores_db_path, n_processes) # Save output # TODO: gather all output business here? output_dir = conf_dict['output_dir'] tools.makedirs(output_dir) tools.write_json(output_dir, 'final.json', final_dict) tools.write_json(output_dir, 'emodel_dirs.json', emodel_dirs)
def test_read_and_process_sqlite_score_tables_error(): """select_combos.sqlite_io: test read_and_process_sqlite_score_tables excep """ # create database, table 'scores' has one entry, table 'score_values' two scores_row = {'test': [1, 3]} scores = pandas.DataFrame(scores_row) score_values_row = {'value': 2} score_values = pandas.DataFrame(score_values_row, index=[0]) test_dir = os.path.join(TMP_DIR, 'test_read_and_process_sqlite_score_tables_error') tools.makedirs(test_dir) filename = 'test_db_error.sql' path = _create_database(test_dir, filename, scores, score_values) # read database, number of rows incompatible -> exception nt.assert_raises(Exception, sqlite_io.read_and_process_sqlite_score_tables, path)
def _test_create_and_write_hoc_file(test_dir, emodel, emodel_dir, hoc_dir, emodel_parameters, template, morph_path, model_name): """Test create_and_write_hoc_files""" with tools.cd(test_dir): tools.makedirs(hoc_dir) prepare_emodel_dirs.create_and_write_hoc_file(emodel, emodel_dir, hoc_dir, emodel_parameters, template, morph_path=morph_path, model_name=model_name) # TODO: test hoc file contents template_name = model_name or emodel hoc_filename = '{}.hoc'.format(template_name) hoc_path = os.path.join(hoc_dir, hoc_filename) nt.assert_true(os.path.isfile(hoc_path))
def main(arg_list): """Main""" # parse and process arguments args = get_parser().parse_args(arg_list) config = tools.load_json(args.conf_filename) config_dir = os.path.abspath(os.path.dirname(args.conf_filename)) config = add_full_paths(config, config_dir) # process configuration mecombo_emodel_filename = config['mecombo_emodel_filename'] combinations_dict = load_combinations_dict(mecombo_emodel_filename) final_dict = tools.load_json(config['final_json_path']) emodels_dir = config['emodels_tmp_dir'] # create output directory for .hoc files tools.makedirs(config['hoc_output_dir']) # create hoc files create_hoc_files(combinations_dict, emodels_dir, final_dict, config['template'], config['hoc_output_dir'])
def test_read_and_process_sqlite_score_tables(): """select_combos.sqlite_io: test read_and_process_sqlite_score_tables""" # create database scores_row = {'test': 1} scores = pandas.DataFrame(scores_row, index=[0]) score_values_row = {'value': 2} score_values = pandas.DataFrame(score_values_row, index=[0]) test_dir = os.path.join(TMP_DIR, 'test_read_and_process_sqlite_score_tables') tools.makedirs(test_dir) filename = 'test_db.sql' path = _create_database(test_dir, filename, scores, score_values) # read database ret_scs, ret_sc_vals = sqlite_io.read_and_process_sqlite_score_tables(path) # verify output nt.assert_false('index' in ret_sc_vals.columns.values) pandas.util.testing.assert_frame_equal(ret_scs, scores) pandas.util.testing.assert_frame_equal(ret_sc_vals, score_values)
def save_megate_results(extneurondb, output_dir, extneurondb_filename='extneurondb.dat', mecombo_emodel_filename='mecombo_emodel.tsv', sort_key=None, make_names_neuron_compliant=False, extra_value_errors=True): """Write results of megating to two files. Args: extneurondb: pandas.DataFrame with result of me-gating output_dir: path to output directory extneurondb_filename: filename of extended neuron database. The columns of this file are ordered as 'morph_name', 'layer', 'fullmtype', 'etype', 'combo_name'. Values are separated by a space. Default filename is 'extneurondb.dat'. mecombo_emodel_filename: filename of 'mecombo_emodel' file. Values are separated with a tab. Default filename is 'mecombo_emodel.tsv'. sort_key: key to sort database in ascending order before writing out to file. Default is None. make_names_neuron_compliant: boolean indicating whether the combo name should be made NEURON-compliant. Default is False. If set to True, a log file with the conversion info is written out to <output_dir>/log_neuron_compliance.csv """ tools.makedirs(output_dir) if make_names_neuron_compliant: log_filename = 'log_neuron_compliance.csv' log_path = os.path.join(output_dir, log_filename) table_processing.process_combo_name(extneurondb, log_path) if sort_key is not None: extneurondb = extneurondb.sort_values(sort_key).reset_index(drop=True) extneurondb_path = os.path.join(output_dir, extneurondb_filename) _write_extneurondbdat(extneurondb, extneurondb_path) print( 'Wrote extneurondb.dat to {}'.format( os.path.abspath(extneurondb_path))) mecombo_emodel_path = os.path.join(output_dir, mecombo_emodel_filename) if extra_value_errors: for extra_values_key in ['holding_current', 'threshold_current']: null_rows = extneurondb[extra_values_key].isnull() if null_rows.sum() > 0: # TODO reenable this for release ! # raise ValueError( # "There are rows with None for " # "holding current: %s" % str( # extneurondb[null_rows])) print("WARNING ! There are rows with None for " "holding current: %s" % str(extneurondb[null_rows])) extneurondb.to_csv(mecombo_emodel_path, sep='\t', index=False) print( 'Wrote mecombo_emodel tsv to {}'.format( os.path.abspath(mecombo_emodel_path))) return extneurondb_path, mecombo_emodel_path