def test_Stage0_parameters(self): # Mouse spiny cell_id_spiny = self.mouse_spiny_id # Create the parameter bounds for the optimization model_params_handler_spiny = AllActive_Model_Parameters(cell_id_spiny) param_bounds_file = 'param_bounds_stage0.json' param_bounds_path = utility.locate_template_file( os.path.join('parameters', param_bounds_file)) model_params_spiny, model_params_release = model_params_handler_spiny.get_opt_params( param_bounds_path) model_mechs_spiny, model_mechs_release = model_params_handler_spiny.get_opt_mechanism( model_params_spiny, model_params_release, param_bounds_path) model_params_dict_spiny = convert_model_params_to_dict( model_params_spiny) mouse_spiny_stage0_params = os.path.join(self.mouse_spiny_path, 'Stage0_parameters.json') mouse_spiny_stage0_mechs = os.path.join(self.mouse_spiny_path, 'Stage0_mechanism.json') model_params_spiny_true = utility.load_json(mouse_spiny_stage0_params) model_mechs_spiny_true = utility.load_json(mouse_spiny_stage0_mechs) model_params_spiny_true_dict = convert_model_params_to_dict( model_params_spiny_true) self.assertEqual(model_params_spiny_true_dict, model_params_dict_spiny) self.assertEqual(model_mechs_spiny_true, model_mechs_spiny) # Mouse aspiny cell_id_aspiny = self.mouse_aspiny_id # Create the parameter bounds for the optimization model_params_handler_aspiny = AllActive_Model_Parameters( cell_id_aspiny, swc_search_pattern='reconstruction.swc') param_bounds_file = 'param_bounds_stage0.json' param_bounds_path = utility.locate_template_file( os.path.join('parameters', param_bounds_file)) model_params_aspiny, model_params_release = model_params_handler_aspiny.\ get_opt_params(param_bounds_path) model_mechs_aspiny, model_mechs_release = model_params_handler_aspiny.\ get_opt_mechanism(model_params_aspiny, model_params_release, param_bounds_path) model_params_dict_aspiny = convert_model_params_to_dict( model_params_aspiny) mouse_aspiny_stage0_params = os.path.join(self.mouse_aspiny_path, 'Stage0_parameters.json') mouse_aspiny_stage0_mechs = os.path.join(self.mouse_aspiny_path, 'Stage0_mechanism.json') model_params_aspiny_true = utility.load_json( mouse_aspiny_stage0_params) model_mechs_aspiny_true = utility.load_json(mouse_aspiny_stage0_mechs) model_params_aspiny_true_dict = convert_model_params_to_dict( model_params_aspiny_true) self.assertEqual(model_params_aspiny_true_dict, model_params_dict_aspiny) self.assertEqual(model_mechs_aspiny_true, model_mechs_aspiny)
def __init__(self, optim_param_path, sens_param_path, param_mod_range,config_file): self.config = utility.load_json(config_file) if \ sens_param_path else None self.sens_parameters = utility.load_json(sens_param_path) if \ sens_param_path else None if optim_param_path: self.optim_param = utility.load_json(optim_param_path) else: self.optim_param = None self.param_range = param_mod_range if param_mod_range else 0
def script_generator(self, chain_job='chain_job.sh', **kwargs): # Force change of certain config properties job_config = utility.load_json(self.job_config_path) stage_jobconfig = job_config['stage_jobconfig'] highlevel_job_props = job_config['highlevel_jobconfig'] analysis_config = stage_jobconfig['analysis_config'] optim_config = stage_jobconfig['optim_config'] stage_jobconfig = update(stage_jobconfig, dryrun_config) stage_jobconfig['optim_config']['ipyparallel'] = False stage_jobconfig['analysis_config']['ipyparallel'] = False stage_jobconfig['seed'] = [1] utility.save_json(self.job_config_path, job_config) testjob_string = '#!/bin/bash\n' testjob_string += 'set -ex\n' testjob_string += 'source activate %s\n' % highlevel_job_props[ 'conda_env'] testjob_string += 'python %s --input_json %s\n' %\ (optim_config['main_script'], self.job_config_path) testjob_string += 'python %s --input_json %s\n'\ % (analysis_config['main_script'], self.job_config_path) if 'next_stage_job_config' in kwargs.keys(): if bool(kwargs['next_stage_job_config']): testjob_string += 'bash %s\n' % chain_job with open(self.script_name, "w") as shell_script: shell_script.write(testjob_string)
def load_params_prev_stage(self, section_map): model_prev_stage = utility.load_json(self.prev_stage_model_path) model_params_prev = list() for key, values in model_prev_stage.items(): if key == 'genome': for j in range(len(values)): # if no apical dendrite in morphology if self.no_apical and model_prev_stage[key][j][ 'section'] == 'apic': continue iter_dict = { 'param_name': model_prev_stage[key][j]['name'] } iter_dict['dist_type'] = 'uniform' iter_dict['sectionlist'] = section_map[ model_prev_stage[key][j]['section']] iter_dict['value'] = float( model_prev_stage[key][j]['value']) iter_dict['type'] = 'section' if model_prev_stage[key][j]['mechanism'] != '': iter_dict['mech'] = model_prev_stage[key][j][ 'mechanism'] iter_dict['type'] = 'range' model_params_prev.append(iter_dict) return model_params_prev
def get_opt_mechanism(self, model_params, model_params_release, param_bounds_path): params_dict = utility.load_json(param_bounds_path) active_params, Ih_params, _, _, _, _ = self.group_params(params_dict) model_mechs = defaultdict(list) model_mechs['all'].append('pas') for param_dict in model_params: if param_dict['param_name'] in active_params + Ih_params: if param_dict['mech'] not in model_mechs[ param_dict['sectionlist']]: model_mechs[param_dict['sectionlist']].append( param_dict['mech']) if model_params_release: model_mechs_release = { 'somatic': ['pas'], 'axonal': ['pas'], 'apical': ['pas'], 'basal': ['pas'] } for param_dict_release in model_params_release: if 'mech' in param_dict_release.keys(): if param_dict_release['mech'] not in model_mechs_release[ param_dict_release['sectionlist']]: model_mechs_release[ param_dict_release['sectionlist']].append( param_dict_release['mech']) else: model_mechs_release = None return model_mechs, model_mechs_release
def create_sa_bound_peri(self,bpopt_param_bounds_path,sens_param_bounds_path, max_bound = .5): # For parameter sensitivity create a new set of bounds because the permutations may # fall outside the original bounds max_bound = max(max_bound,self.param_range+.1) param_bounds = utility.load_json(bpopt_param_bounds_path) optim_param_bpopt_format = {} param_sens_list = list() for i,param_dict in enumerate(param_bounds): if 'sectionlist' in param_dict.keys(): name_loc = param_dict['param_name'] + '.' + \ param_dict['sectionlist'] if param_dict['param_name'] not in ['ena','ek']: optim_param_bpopt_format[name_loc] = param_dict['value'] lb = param_dict['value']-\ max_bound*abs(param_dict['value']) ub = param_dict['value']+\ max_bound*abs(param_dict['value']) param_dict['bounds'] = [lb,ub] del param_dict['value'] param_sens_list.append(param_dict) utility.save_json(sens_param_bounds_path,param_sens_list) return optim_param_bpopt_format
def write_opt_config_file(self, param_write_path, mech_write_path, mech_release_write_path, train_features_write_path, test_features_write_path, protocols_write_path, release_params, release_param_write_path, opt_config_filename='config_file.json', **kwargs): if not os.path.exists(opt_config_filename): path_dict = dict() else: path_dict = utility.load_json(opt_config_filename) path_dict['parameters'] = param_write_path path_dict['mechanism'] = mech_write_path path_dict['released_aa_mechanism'] = mech_release_write_path path_dict['train_features'] = train_features_write_path path_dict['test_features'] = test_features_write_path path_dict['train_protocols'] = protocols_write_path path_dict['released_aa_model_dict'] = release_params path_dict['released_aa_model'] = release_param_write_path path_dict['released_peri_model'] = kwargs.get('released_peri_model') path_dict['released_peri_mechanism'] = kwargs.get( 'released_peri_mechanism') # for config_key,path in kwargs.items(): # path_dict[config_key] = path utility.save_json(opt_config_filename, path_dict)
def script_generator(self): all_config = utility.load_json(self.job_config_path) stage_jobconfig = all_config['stage_jobconfig'] highlevel_job_props = all_config['highlevel_jobconfig'] machine = highlevel_job_props['machine'] if any(substring in machine for substring in ['aws', 'hpc-login']): self.submit_cmd = 'qsub' elif any(substring in machine for substring in ['cori', 'bbp']): self.submit_cmd = 'sbatch' else: self.submit_cmd = 'bash' conda_env = highlevel_job_props.get('conda_env', 'ateam_opt') with open(self.script_template, 'r') as job_template: subjob_string = job_template.read() subjob_string = subjob_string.replace('conda_env', conda_env) if stage_jobconfig.get('stage_name'): stage_path = os.path.join(highlevel_job_props['job_dir'], stage_jobconfig['stage_name']) subjob_string = subjob_string.replace('stage_jobdir', stage_path) subjob_string = subjob_string.replace( ' Stage ', ' %s ' % stage_jobconfig['stage_name']) subjob_string = subjob_string.replace('submit_cmd', self.submit_cmd) if highlevel_job_props.get( 'modfiles_dir' ) and not highlevel_job_props.get('compiled_modfiles_dir'): subjob_string = subjob_string.replace( 'modfiles_dir_abs', os.path.basename(highlevel_job_props['modfiles_dir'])) subjob_string = subjob_string.replace( 'modfiles_dir', highlevel_job_props['modfiles_dir']) subjob_string = re.sub('# Copy compiled[\S\s]*fi', '', subjob_string) elif highlevel_job_props.get('compiled_modfiles_dir'): subjob_string = re.sub( '# Copy modfiles[\S\s]*modfiles_dir_abs; fi', '', subjob_string) subjob_string = subjob_string.replace( 'compiled_modfiles_dir', highlevel_job_props['compiled_modfiles_dir']) else: subjob_string = re.sub('# Copy modfiles[\S\s]*fi', '', subjob_string) with open(self.script_name, "w") as chainsubjob_script: chainsubjob_script.write(subjob_string) if bool(stage_jobconfig) and self.submit_cmd == 'bash': self.adjust_template('RES=$(%s batch_job.sh)' % self.submit_cmd, '%s batch_job.sh' % self.submit_cmd) self.adjust_template('echo ${RES##* }', '', partial_match=True)
def get_data_fields(data_path): if isinstance(data_path, pd.DataFrame): return list(data_path) else: if data_path.endswith('.json'): json_data = utility.load_json(data_path) return list(json_data.keys()) elif data_path.endswith('.csv'): csv_data = pd.read_csv(data_path, index_col=None) return list(csv_data) print('Not in .json,.csv or pandas dataframe format') return None
def get_release_params(self, section_map, rev_potential): if self.release_param_path: model_params_release = list() data_release = utility.load_json(self.release_param_path) for key, values in data_release.items(): if key == 'genome': for j in range(len(values)): iter_dict_release = { 'param_name': data_release[key][j]['name'] } iter_dict_release['sectionlist'] = section_map[ data_release[key][j]['section']] iter_dict_release['type'] = 'section' iter_dict_release['value'] = float( data_release[key][j]['value']) iter_dict_release['dist_type'] = 'uniform' if data_release[key][j]['mechanism'] != '': iter_dict_release['mech'] = data_release[key][j][ 'mechanism'] iter_dict_release['type'] = 'range' model_params_release.append(iter_dict_release) for sect in list(set(section_map.values()) - set(['all'])): for rev in rev_potential: iter_dict_release = { 'param_name': rev, 'sectionlist': sect, 'dist_type': 'uniform', 'type': 'section' } if rev == 'ena': iter_dict_release['value'] = rev_potential[rev] elif rev == 'ek': iter_dict_release['value'] = rev_potential[rev] model_params_release.append(iter_dict_release) model_params_release.append({ "param_name": "celsius", "type": "global", "value": self.temperature }) model_params_release.append({ "param_name": "v_init", "type": "global", "value": -90 }) else: model_params_release = None return model_params_release
def create_sa_bound(self,bpopt_param_bounds_path,sens_param_bounds_path, max_bound = .5): # For parameter sensitivity create a new set of bounds because the permutations may # fall outside the original bounds max_bound = max(max_bound,self.param_range+.1) bpopt_section_map = utility.bpopt_section_map param_bounds = utility.load_json(bpopt_param_bounds_path) optim_param = self.optim_param optim_param_bpopt_format = {} if 'genome' in optim_param.keys(): print('The parameter file is in AIBS format') for aibs_param_dict in optim_param['genome']: param_name,param_sect = aibs_param_dict['name'],\ aibs_param_dict['section'] if param_name in ['e_pas','g_pas','Ra']: param_sect = 'all' param_sect = bpopt_section_map[param_sect] optim_param_bpopt_format[param_name+'.'+param_sect]=\ float(aibs_param_dict['value']) else: print('The parameter file is in bluepyopt format') for key,val in optim_param.items(): key_param,key_sect = key.split('.') try: key_sect = bpopt_section_map[key_sect] except: print('Already in bluepyopt format') optim_param_bpopt_format[key_param+'.'+key_sect]=val param_sens_list = list() for i,param_dict in enumerate(param_bounds): bound = param_dict.get('bounds') if bound: name_loc = param_dict['param_name'] + '.' + param_dict['sectionlist'] lb = min(param_dict['bounds'][0], optim_param_bpopt_format[name_loc]-\ max_bound*abs(optim_param_bpopt_format[name_loc])) ub = max(param_dict['bounds'][1], optim_param_bpopt_format[name_loc]+\ max_bound*abs(optim_param_bpopt_format[name_loc])) param_dict['bounds'] = [lb,ub] param_sens_list.append(param_dict) utility.save_json(sens_param_bounds_path,param_sens_list) return optim_param_bpopt_format
def __init__(self, protocol_path, feature_path, morph_path, param_path, mech_path, ephys_dir='preprocessed', skip_features=['peak_time'], **props): """ do_replace_axon : bluepyopt axon replace code, diameter taken from swc file """ self.morph_path = morph_path if morph_path else None self.protocol_path = protocol_path if protocol_path else None self.feature_path = feature_path if feature_path else None self.param_path = param_path if param_path else None self.mech_path = mech_path if mech_path else None self.ephys_dir = ephys_dir self.AIS_check = False self.skip_features = skip_features if self.feature_path: feature_definitions = utility.load_json(self.feature_path) feature_set = [] for feat_key, feat_val in feature_definitions.items(): feature_set.extend(feat_val['soma'].keys()) self.AIS_check = True if 'check_AISInitiation' in \ list(set(feature_set)) else False if any(timed_prop in props for timed_prop in ['timeout', 'learn_eval_trend']): self.timed_evaluation = True else: self.timed_evaluation = False self.axon_type = 'stub_axon' if props.pop('do_replace_axon', None): self.axon_type = 'bpopt_replaced_axon' self.eval_props = props
from allensdk.core.cell_types_cache import CellTypesCache import pandas as pd import os def get_morph_path(cell_id): lr = lims.LimsReader() morph_path = lr.get_swc_path_from_lims(int(cell_id)) return morph_path data_path = os.path.join(os.getcwd(),os.pardir,os.pardir,'assets','aggregated_data') cre_color_tasic16_filename = os.path.join(data_path,'cre_color_tasic16.pkl') cre_color_dict = utility.load_pickle(cre_color_tasic16_filename) cre_color_dict['Other'] = (0,0,0) depth_data_filename = os.path.join(data_path,'mouse_me_and_met_avg_layer_depths.json') # Average layerwise depths for mouse depth_data = utility.load_json(depth_data_filename) total_depth = depth_data['wm'] # Cells are chosen to sample from diverse types within each layer cell_id_dict = { #'1':['574734127','564349611','475585413','555341581','536951541'], '1':['574734127','475585413','536951541'], # '2/3':['485184849','475515168','485468180','476087653','571306690'], '2/3':['485184849','475515168','485468180'], # '4':['483101699','602822298','490205998','569723367','324257146'], '4':['483101699','602822298','569723367'], '5':['479225052','607124114','515249852'], '6a':['490259231','473564515','561985849'], # '6b':['589128331','574993444','510136749','509881736','590558808'] '6b':['589128331'] }
drop=True) original_model_path = "/allen/aibs/mat/ateam_shared/Mouse_Model_Fit_Metrics" model_refit_path = "/allen/programs/celltypes/workgroups/humancolumn_ephysmodeling/"\ "anin/Optimizations_HPC/Mouse_Benchmark" # %% Load Models model_refit_list = [] original_model_list = [] for cell_id in cell_list: try: refit_path = os.path.join( model_refit_path, cell_id, "benchmark_final", f"Stage2/fitted_params/optim_param_{cell_id}_bpopt.json") model_refit_dict = utility.load_json(refit_path) except FileNotFoundError: refit_path = os.path.join( model_refit_path, cell_id, "benchmark_new", f"Stage2/fitted_params/optim_param_{cell_id}_bpopt.json") model_refit_dict = utility.load_json(refit_path) model_refit_dict.update({"Cell_id": cell_id}) model_refit_list.append(model_refit_dict) original_path = os.path.join( original_model_path, f"{cell_id}/fitted_params/optim_param_{cell_id}_bpopt.json") original_model_dict = utility.load_json(original_path) original_model_dict.update({"Cell_id": cell_id}) original_model_list.append(original_model_dict)
def main(): # Read sensitivity analysis config file sens_config_file = sys.argv[-1] sens_config_dict = utility.load_json(sens_config_file) cell_id = sens_config_dict['Cell_id'] cpu_count = sens_config_dict['cpu_count'] if 'cpu_count'\ in sens_config_dict.keys() else mp.cpu_count() perisomatic_sa = sens_config_dict.get('run_peri_analysis',False) # Parameters to vary (All-active) select_aa_param_path = sens_config_dict['select_aa_param_path'] # knobs # Parameters to vary (Perisomatic) if perisomatic_sa: select_peri_param_path = sens_config_dict['select_peri_param_path'] # knobs select_feature_path = sens_config_dict['select_feature_path'] # knobs param_mod_range = sens_config_dict.get('param_mod_range',.1) # knobs mechanism_path = sens_config_dict['mechanism'] # config files with all the paths for Bluepyopt sim lr = lims.LimsReader() morph_path = lr.get_swc_path_from_lims(int(cell_id)) model_base_path='/allen/aibs/mat/ateam_shared/' \ 'Mouse_Model_Fit_Metrics/{}'.format(cell_id) opt_config_file = os.path.join(model_base_path,'config_file.json') if not os.path.exists(opt_config_file): opt_config = { "morphology": "", "parameters": "config/{}/parameters.json".format(cell_id), "mechanism": "config/{}/mechanism.json".format(cell_id), "protocols": "config/{}/protocols.json".format(cell_id), "all_protocols": "config/{}/all_protocols.json".format(cell_id), "features": "config/{}/features.json".format(cell_id), "peri_parameters": "config/{}/peri_parameters.json".format(cell_id), "peri_mechanism": "config/{}/peri_mechanism.json".format(cell_id) } opt_config_file = os.path.join(os.getcwd(),'config_file.json') utility.save_json(opt_config_file,opt_config) # optimized parameters around which select parameters are varied optim_param_path_aa = '/allen/aibs/mat/ateam_shared/Mouse_Model_Fit_Metrics/'\ '{cell_id}/fitted_params/optim_param_unformatted_{cell_id}.json'.\ format(cell_id = cell_id) if not os.path.exists(optim_param_path_aa): optim_param_path_aa = '/allen/aibs/mat/ateam_shared/Mouse_Model_Fit_Metrics/'\ '{cell_id}/fitted_params/optim_param_{cell_id}_bpopt.json'.\ format(cell_id = cell_id) SA_obj_aa = SA_helper(optim_param_path_aa,select_aa_param_path,param_mod_range, opt_config_file) _,protocol_path,mech_path,feature_path,\ param_bound_path = SA_obj_aa.load_config(model_base_path) # Make sure to get the parameter bounds big enough for BluePyOpt sim sens_param_bound_write_path_aa = "param_sensitivity_aa.json" optim_param_aa = SA_obj_aa.create_sa_bound(param_bound_path, sens_param_bound_write_path_aa) param_dict_uc_aa = SA_obj_aa.create_sens_param_dict() parameters_aa ={key:optim_param_aa[val] for key,val in param_dict_uc_aa.items()} eval_handler_aa = Bpopt_Evaluator(protocol_path, feature_path, morph_path, sens_param_bound_write_path_aa, mech_path, ephys_dir=None, timed_evaluation = False) evaluator_aa = eval_handler_aa.create_evaluator() opt_aa = bpopt.optimisations.DEAPOptimisation(evaluator=evaluator_aa) stim_protocols = utility.load_json(protocol_path) stim_protocols = {key:val for key,val in stim_protocols.items() \ if 'LongDC' in key} stim_dict = {key:val['stimuli'][0]['amp'] \ for key,val in stim_protocols.items()} sorted_stim_tuple= sorted(stim_dict.items(), key=operator.itemgetter(1)) stim_name= sorted_stim_tuple[-1][0] # knobs (the max amp) # Copy compiled modfiles if not os.path.isdir('x86_64'): raise Exception('Compiled modfiles do not exist') efel_features = utility.load_json(select_feature_path) un_features = un.EfelFeatures(features_to_run=efel_features) un_parameters_aa = un.Parameters(parameters_aa) un_parameters_aa.set_all_distributions(un.uniform(param_mod_range)) un_model_aa = un.Model(run=nrnsim_bpopt, interpolate=True, labels=["Time (ms)", "Membrane potential (mV)"], opt=opt_aa,stim_protocols =stim_protocols, param_dict_uc = param_dict_uc_aa, stim_name=stim_name, optim_param=optim_param_aa) # Perform the uncertainty quantification UQ_aa = un.UncertaintyQuantification(un_model_aa, parameters=un_parameters_aa, features=un_features) data_folder = 'sensitivity_data' sa_filename_aa = 'sa_allactive_%s.h5'%cell_id sa_filename_aa_csv = 'sa_allactive_%s.csv'%cell_id sa_data_path_aa = os.path.join(data_folder,sa_filename_aa) sa_aa_csv_path = os.path.join(data_folder,sa_filename_aa_csv) UQ_aa.quantify(seed=0,CPUs=cpu_count,data_folder=data_folder, filename= sa_filename_aa) _ = SA_obj_aa.save_analysis_data(sa_data_path_aa, filepath=sa_aa_csv_path) cell_data_aa = un.Data(sa_data_path_aa) SA_obj_aa.plot_sobol_analysis(cell_data_aa,analysis_path = \ 'figures/sa_analysis_aa_%s.pdf'%cell_id, palette='Set1') # Perisomatic model if perisomatic_sa: try: optim_param_path_peri = None SA_obj_peri = SA_helper(optim_param_path_peri,select_peri_param_path,param_mod_range, opt_config_file) _,_,mech_path_peri,_,\ param_bound_path_peri = SA_obj_peri.load_config(model_base_path, perisomatic=True) sens_param_bound_write_path_peri = "param_sensitivity_peri.json" optim_param_peri = SA_obj_peri.create_sa_bound_peri(param_bound_path_peri, sens_param_bound_write_path_peri) param_dict_uc_peri = SA_obj_peri.create_sens_param_dict() parameters_peri ={key:optim_param_peri[val] for key,val in param_dict_uc_peri.items()} eval_handler_peri = Bpopt_Evaluator(protocol_path, feature_path, morph_path, sens_param_bound_write_path_peri, mech_path_peri, ephys_dir=None, timed_evaluation = False) evaluator_peri = eval_handler_peri.create_evaluator() opt_peri = bpopt.optimisations.DEAPOptimisation(evaluator=evaluator_peri) un_parameters_peri= un.Parameters(parameters_peri) un_parameters_peri.set_all_distributions(un.uniform(param_mod_range)) un_model_peri = un.Model(run=nrnsim_bpopt, interpolate=True, labels=["Time (ms)", "Membrane potential (mV)"], opt=opt_peri,stim_protocols =stim_protocols, param_dict_uc = param_dict_uc_peri, stim_name=stim_name, optim_param=optim_param_peri) UQ_peri = un.UncertaintyQuantification(un_model_peri, parameters=un_parameters_peri, features=un_features) sa_filename_peri = 'sa_perisomatic_%s.h5'%cell_id sa_filename_peri_csv = 'sa_perisomatic_%s.csv'%cell_id sa_data_path_peri = os.path.join(data_folder,sa_filename_peri) sa_peri_csv_path = os.path.join(data_folder,sa_filename_peri_csv) UQ_peri.quantify(seed=0,CPUs=cpu_count,data_folder=data_folder, filename= sa_filename_peri) _ = SA_obj_peri.save_analysis_data(sa_data_path_peri, filepath=sa_peri_csv_path) cell_data_peri = un.Data(sa_data_path_peri) SA_obj_peri.plot_sobol_analysis(cell_data_peri,analysis_path = \ 'figures/sa_analysis_peri_%s.pdf'%cell_id, palette='Set2') except Exception as e: print(e)
def main(args): stage_jobconfig = args['stage_jobconfig'] highlevel_job_props = args['highlevel_jobconfig'] logging.basicConfig(level=highlevel_job_props['log_level']) parent_dir = os.path.abspath(os.path.join('.', os.pardir)) path_to_cell_metadata = glob.glob(parent_dir + '/cell_metadata*.json')[0] cell_metadata = utility.load_json(path_to_cell_metadata) cell_id = cell_metadata['cell_id'] peri_model_id = cell_metadata.get('peri_model_id') released_aa_model_id = cell_metadata.get('released_aa_model_id') all_protocols_path = highlevel_job_props['all_protocols_path'] all_features_path = highlevel_job_props['all_features_path'] morph_path = highlevel_job_props['swc_path'] axon_type = highlevel_job_props['axon_type'] ephys_dir = highlevel_job_props['ephys_dir'] train_features_path = args['train_features'] test_features_path = args['test_features'] param_write_path = args['parameters'] mech_write_path = args['mechanism'] release_param_write_path = args['released_aa_model'] mech_release_write_path = args['released_aa_mechanism'] analysis_parallel = (stage_jobconfig['analysis_config'].get('ipyparallel') and stage_jobconfig['run_hof_analysis']) props = dict(axon_type=axon_type, ephys_dir=ephys_dir) map_function = analyzer_map(analysis_parallel) opt_train = get_opt_obj(all_protocols_path, train_features_path, morph_path, param_write_path, mech_write_path, map_function, **props) opt_all = get_opt_obj(all_protocols_path, all_features_path, morph_path, param_write_path, mech_write_path, map_function, **props) opt_test = get_opt_obj(all_protocols_path, test_features_path, morph_path, param_write_path, mech_write_path, map_function, **props) analysis_handler = Optim_Analyzer(args, opt_all) best_model = analysis_handler.get_best_model( ) # Model with least training error aibs_params_modelname = 'fitted_params/optim_param_%s.json' % cell_id analysis_handler.save_params_aibs_format(aibs_params_modelname, best_model[0], expand_params=True) aibs_params_compact_modelname = 'fitted_params/optim_param_%s_compact.json' % cell_id analysis_handler.save_params_aibs_format(aibs_params_compact_modelname, best_model[0]) bpopt_params_modelname = 'fitted_params/optim_param_%s_bpopt.json' % cell_id analysis_handler.save_params_bpopt_format(bpopt_params_modelname, best_model[0]) # Export hoc model if stage_jobconfig['hoc_export']: hoc_export_path = 'fitted_params/model_template_%s.hoc' % cell_id utility.create_filepath(hoc_export_path) best_param_dict = {key:best_model[0][i] for i,key in \ enumerate(opt_train.evaluator.param_names)} model_string = opt_train.evaluator.cell_model.create_hoc( best_param_dict) with open(hoc_export_path, "w") as hoc_template: hoc_template.write(model_string) hof_model_params, seed_indices = analysis_handler.get_all_models() if not stage_jobconfig.get('run_hof_analysis'): hof_model_params, seed_indices = best_model, [seed_indices[0]] hof_params_filename = 'analysis_params/hof_model_params.pkl' hof_responses_filename = 'analysis_params/hof_response_all.pkl' obj_list_train_filename = 'analysis_params/hof_obj_train.pkl' obj_list_all_filename = 'analysis_params/hof_obj_all.pkl' feat_list_all_filename = 'analysis_params/hof_features_all.pkl' obj_list_test_filename = 'analysis_params/hof_obj_test.pkl' seed_indices_filename = 'analysis_params/seed_indices.pkl' score_list_train_filename = 'analysis_params/score_list_train.pkl' # Response for the entire hall of fame not arranged hof_response_list = analysis_handler.get_model_responses( hof_model_params, hof_responses_filename) analysis_handler._opt = opt_train obj_list_train = analysis_handler.get_response_scores(hof_response_list) # Sort everything with respect to training error if not os.path.exists(score_list_train_filename): score_list_train = [ np.sum(list(obj_dict_train.values())) for obj_dict_train in obj_list_train ] utility.create_filepath(score_list_train_filename) utility.save_pickle(score_list_train_filename, score_list_train) else: score_list_train = utility.load_pickle(score_list_train_filename) if not os.path.exists(seed_indices_filename): seed_indices_sorted = analysis_handler.organize_models( seed_indices, score_list_train) utility.save_pickle(seed_indices_filename, seed_indices_sorted) if not os.path.exists(obj_list_train_filename): obj_list_train_sorted = analysis_handler.organize_models( obj_list_train, score_list_train) utility.save_pickle(obj_list_train_filename, obj_list_train_sorted) if not os.path.exists(obj_list_all_filename): analysis_handler._opt = opt_all obj_list_gen = analysis_handler.get_response_scores(hof_response_list) obj_list_gen_sorted = analysis_handler.organize_models( obj_list_gen, score_list_train) utility.save_pickle(obj_list_all_filename, obj_list_gen_sorted) if not os.path.exists(feat_list_all_filename): analysis_handler._opt = opt_all feat_list_gen = analysis_handler.get_response_features( hof_response_list) feat_list_gen_sorted = analysis_handler.organize_models( feat_list_gen, score_list_train) utility.save_pickle(feat_list_all_filename, feat_list_gen_sorted) if not os.path.exists(obj_list_test_filename): analysis_handler._opt = opt_test obj_list_test = analysis_handler.get_response_scores(hof_response_list) obj_list_test_sorted = analysis_handler.organize_models( obj_list_test, score_list_train) utility.save_pickle(obj_list_test_filename, obj_list_test_sorted) analysis_handler._opt = opt_train # Save the sorted hof responses at the end hof_response_sorted = analysis_handler.organize_models( hof_response_list, score_list_train) utility.save_pickle(hof_responses_filename, hof_response_sorted) # Save the sorted hall of fame output in .pkl hof_model_params_sorted = analysis_handler.save_hof_output_params( hof_model_params, hof_params_filename, score_list_train) # Save the entire hall of fame parameters for i, hof_param in enumerate(hof_model_params_sorted): aibs_params_modelname = os.path.join( 'fitted_params', 'hof_param_%s_%s.json' % (cell_id, i)) analysis_handler.save_params_aibs_format(aibs_params_modelname, hof_param, expand_params=True) # Now save the sorted score utility.save_pickle(score_list_train_filename, sorted(score_list_train)) GA_evol_path = os.path.join('analysis_params', 'GA_evolution_params.pkl') analysis_handler.save_GA_evolultion_info(GA_evol_path) resp_filename = os.path.join(os.getcwd(), 'resp_opt.txt') analysis_handler.save_best_response(hof_response_sorted[0], resp_filename) if release_param_write_path: eval_handler_release = Bpopt_Evaluator(all_protocols_path, all_features_path, morph_path, release_param_write_path, mech_release_write_path, stub_axon=False, do_replace_axon=True, ephys_dir=ephys_dir) evaluator_release = eval_handler_release.create_evaluator() opt_release = bpopt.optimisations.DEAPOptimisation( evaluator=evaluator_release) resp_release_filename = os.path.join(os.getcwd(), 'resp_release.txt') analysis_handler.get_release_responses(opt_release, resp_release_filename) resp_release_aa = utility.load_pickle(resp_release_filename)[0] features_release_aa = opt_release.evaluator.fitness_calculator.\ calculate_features(resp_release_aa) features_aa_filename = os.path.join( 'Validation_Responses', 'Features_released_aa_%s.pkl' % cell_id) utility.create_filepath(features_aa_filename) utility.save_pickle(features_aa_filename, features_release_aa) else: resp_release_filename = None stim_mapfile = highlevel_job_props['stimmap_file'] analysis_write_path = '%s_%s.pdf' % (cell_id, stage_jobconfig['stage_name']) pdf_pages = PdfPages(analysis_write_path) model_type = 'All-active' pdf_pages = analysis_handler.plot_grid_Response(resp_filename, resp_release_filename, stim_mapfile, pdf_pages) pdf_pages = analysis_handler.plot_feature_comp(resp_filename, resp_release_filename, pdf_pages) pdf_pages = analysis_handler.plot_GA_evol(GA_evol_path, pdf_pages) pdf_pages = analysis_handler.plot_param_diversity(hof_params_filename, pdf_pages) if stage_jobconfig['model_postprocess']: exp_fi_path = os.path.join('Validation_Responses', 'fI_exp_%s.pkl' % cell_id) model_fi_path = os.path.join('Validation_Responses', 'fI_aa_%s.pkl' % cell_id) exp_AP_shape_path = os.path.join('Validation_Responses', 'AP_shape_exp_%s.pkl' % cell_id) model_AP_shape_path = os.path.join('Validation_Responses', 'AP_shape_aa_%s.pkl' % cell_id) pdf_pages = analysis_handler.postprocess( stim_mapfile, resp_filename, pdf_pages, exp_fi_path, model_fi_path, exp_AP_shape_path, model_AP_shape_path, model_type) # Perisomatic model if peri_model_id and stage_jobconfig['run_peri_comparison']: resp_peri_filename = os.path.join(os.getcwd(), 'resp_peri.txt') peri_param_path = args['released_peri_model'] peri_mech_path = args['released_peri_mechanism'] props_peri = props.copy() props_peri['axon_type'] = 'stub_axon' eval_handler_peri = Bpopt_Evaluator(all_protocols_path, all_features_path, morph_path, peri_param_path, peri_mech_path, **props_peri) evaluator_peri = eval_handler_peri.create_evaluator() opt_peri = bpopt.optimisations.DEAPOptimisation( evaluator=evaluator_peri) model_type = 'Perisomatic' analysis_handler.get_release_responses(opt_peri, resp_peri_filename) resp_peri = utility.load_pickle(resp_peri_filename)[0] features_peri = opt_peri.evaluator.fitness_calculator.calculate_features( resp_peri) features_peri_filename = os.path.join('Validation_Responses', 'Features_peri_%s.pkl' % cell_id) utility.create_filepath(features_peri_filename) utility.save_pickle(features_peri_filename, features_peri) pdf_pages = analysis_handler.plot_grid_Response( resp_filename, resp_peri_filename, stim_mapfile, pdf_pages, resp_comparison=model_type) if stage_jobconfig['model_postprocess']: model_fi_path = os.path.join('Validation_Responses', 'fI_peri_%s.pkl' % cell_id) model_AP_shape_path = os.path.join( 'Validation_Responses', 'AP_shape_peri_%s.pkl' % cell_id) pdf_pages = analysis_handler.postprocess( stim_mapfile, resp_peri_filename, pdf_pages, exp_fi_path, model_fi_path, exp_AP_shape_path, model_AP_shape_path, model_type) if stage_jobconfig.get('calc_model_perf'): spiketimes_exp_path = os.path.join('Validation_Responses', 'spiketimes_exp_noise.pkl') all_features = utility.load_json(all_features_path) spiketimes_noise_exp = {} for stim_, feat in all_features.items(): if 'Noise' in stim_: if 'peak_time' in feat['soma'].keys(): spiketimes_noise_exp[stim_] = feat['soma']['peak_time'][2] if bool(spiketimes_noise_exp): utility.create_filepath(spiketimes_exp_path) utility.save_pickle(spiketimes_exp_path, spiketimes_noise_exp) spiketimes_hof_path = os.path.join('Validation_Responses', 'spiketimes_model_noise.pkl') exp_variance_hof_path = os.path.join('Validation_Responses', 'exp_variance_hof.pkl') model_perf_filename = os.path.join('Validation_Responses', 'fitness_metrics_%s.csv' % cell_id) pdf_pages = analysis_handler.hof_statistics( stim_mapfile, pdf_pages, obj_list_all_filename, hof_responses_filename, obj_list_train_filename, obj_list_test_filename, seed_indices_filename, spiketimes_exp_path, spiketimes_hof_path, exp_variance_hof_path, cell_metadata, model_perf_filename) pdf_pages.close() if stage_jobconfig.get('calc_time_statistics'): # time_by_gen_filename = 'time_info.txt' # if os.path.exists(time_by_gen_filename): # time_metrics_filename = 'time_metrics_%s.csv' % cell_id # analysis_module.save_optimization_time(time_by_gen_filename, # time_metrics_filename, cell_metadata) compute_statistics_filename = 'compute_metrics_%s.csv' % cell_id opt_logbook = 'logbook_info.txt' analysis_module.save_compute_statistics(opt_logbook, compute_statistics_filename)
def get_opt_params(self, param_bounds_path, prev_stage_tolerance=None): section_map = utility.bpopt_section_map params_dict = utility.load_json(param_bounds_path) _, _, _, all_params_dict,\ ena_sect, ek_sect = self.group_params(params_dict) model_params_opt = list() # Create parameter file from the initialization bounds for optimization for param_name, param_dict in all_params_dict.items(): for sect in param_dict['section']: if self.no_apical and sect == 'apic': # if no apical dendrite in morphology continue iter_dict = {'param_name': param_name} iter_dict['sectionlist'] = section_map[sect] iter_dict['type'] = 'section' iter_dict['dist_type'] = 'uniform' try: iter_dict['mech'] = param_dict['mechanism'] iter_dict['type'] = 'range' except: pass iter_dict['bounds'] = param_dict['bounds'][sect] model_params_opt.append(iter_dict) # Adjust parameter bounds from previous stage if self.prev_stage_model_path and prev_stage_tolerance is not None: model_params_prev = self.load_params_prev_stage(section_map) for model_param_dict in model_params_prev: unique_param = model_param_dict['param_name']+'.'+\ model_param_dict['sectionlist'] model_param_opt_entry = list(filter(lambda x: x['param_name']+'.'+\ x['sectionlist'] == unique_param, model_params_opt))[0] model_param_opt_entry = adjust_param_bounds( model_param_opt_entry, model_param_dict, prev_stage_tolerance) # Add reversal potential if Na, K currents are present rev_potential = utility.rev_potential for rev in rev_potential: if rev == 'ena': for sect in ena_sect: if self.no_apical and sect == 'apic': # if no apical dendrite in morphology continue iter_dict = { 'param_name': rev, 'sectionlist': section_map[sect], 'dist_type': 'uniform', 'type': 'section', 'value': rev_potential[rev] } model_params_opt.append(iter_dict) elif rev == 'ek': for sect in ek_sect: if self.no_apical and sect == 'apic': # if no apical dendrite in morphology continue iter_dict = { 'param_name': rev, 'sectionlist': section_map[sect], 'dist_type': 'uniform', 'type': 'section', 'value': rev_potential[rev] } model_params_opt.append(iter_dict) # Add experimental conditions model_params_opt.append({ "param_name": "celsius", "type": "global", "value": self.temperature }) model_params_opt.append({ "param_name": "v_init", "type": "global", "value": self.v_init }) model_params_release = self.get_release_params(section_map, rev_potential) return model_params_opt, model_params_release
data_path, 'filtered_me_inh_cells.pkl') filtered_me_exc_cells_filename = os.path.join( data_path, 'filtered_me_exc_cells.pkl') mouse_data_df = man_utils.read_csv_with_dtype( mouse_data_filename, mouse_datatype_filename) morph_data = man_utils.read_csv_with_dtype( morph_data_filename, morph_datatype_filename) # morph_data = morph_data.loc[:,[morph_feature for morph_feature in morph_data.columns # if not any(sec in morph_feature for sec in['apical','axon'])]] morph_fields = man_utils.get_data_fields(morph_data) ephys_data = man_utils.read_csv_with_dtype(train_ephys_max_amp_fname, train_ephys_max_amp_dtype_fname) ephys_fields = utility.load_json(train_ephys_max_amp_fields_fname) hof_param_data = man_utils.read_csv_with_dtype( param_data_filename, param_datatype_filename) cre_color_dict = utility.load_pickle(cre_coloring_filename) bcre_color_dict = utility.load_pickle(bcre_coloring_filename) bcre_index_order = list(bcre_color_dict.keys()) filtered_me_inh_cells = utility.load_pickle(filtered_me_inh_cells_filename) filtered_me_exc_cells = utility.load_pickle(filtered_me_exc_cells_filename) cre_cluster_color_dict = OrderedDict() cre_type_cluster = utility.load_pickle(cre_ttype_filename) for cre, color in cre_color_dict.items(): if cre_type_cluster[cre] not in cre_cluster_color_dict.keys():
def aibs_peri_to_bpopt(self, peri_param_path, base_dir='config/'): peri_params = utility.load_json(peri_param_path) peri_params_release = list() peri_mechs_release = defaultdict(list) peri_mechs_release['all'].append('pas') rev_potential = utility.rev_potential section_map = utility.bpopt_section_map for key, values in peri_params.items(): if key == 'genome': for j in range(len(values)): iter_dict_release = { 'param_name': peri_params[key][j]['name'] } iter_dict_release['sectionlist'] = section_map[ peri_params[key][j]['section']] iter_dict_release['type'] = 'section' iter_dict_release['value'] = float( peri_params[key][j]['value']) iter_dict_release['dist_type'] = 'uniform' if peri_params[key][j]['mechanism'] != '': iter_dict_release['mech'] = peri_params[key][j][ 'mechanism'] iter_dict_release['type'] = 'range' peri_params_release.append(iter_dict_release) elif key == 'passive': for key_pas, val_pas in values[0].items(): if key_pas == 'cm': for pas_param in val_pas: iter_dict_release = { 'param_name': 'cm', 'sectionlist': section_map[pas_param['section']], 'value': pas_param['cm'], 'dist_type': 'uniform', 'type': 'section' } peri_params_release.append(iter_dict_release) else: iter_dict_release = { 'param_name': 'Ra' if key_pas == 'ra' else key_pas, 'sectionlist': 'all', 'value': val_pas, 'dist_type': 'uniform', 'type': 'section' } peri_params_release.append(iter_dict_release) for rev in rev_potential: iter_dict_release = { 'param_name': rev, 'sectionlist': 'somatic', 'dist_type': 'uniform', 'type': 'section' } if rev == 'ena': iter_dict_release['value'] = rev_potential[rev] elif rev == 'ek': iter_dict_release['value'] = rev_potential[rev] peri_params_release.append(iter_dict_release) peri_params_release.append({ "param_name": "celsius", "type": "global", "value": 34 }) peri_params_release.append({ "param_name": "v_init", "type": "global", "value": peri_params['conditions'][0]["v_init"] }) for param_dict in peri_params_release: if 'mech' in param_dict.keys(): if param_dict['mech'] not in peri_mechs_release[ param_dict['sectionlist']]: peri_mechs_release[param_dict['sectionlist']].append( param_dict['mech']) peri_params_write_path = os.path.join(base_dir, self.cell_id, 'peri_parameters.json') peri_mech_write_path = os.path.join(base_dir, self.cell_id, 'peri_mechanism.json') utility.create_filepath(peri_params_write_path) utility.save_json(peri_params_write_path, peri_params_release) utility.save_json(peri_mech_write_path, peri_mechs_release) return peri_params_write_path, peri_mech_write_path
def create_optim_job(args): level = logging.getLevelName(args['log_level']) logger.setLevel(level) cty_props = args['cty_config'] cell_id = cty_props['cell_id'] highlevel_job_props = args['job_config']['highlevel_jobconfig'] stage_job_props = args['job_config']['stage_jobconfig'] # Change any paths to absolute path for ii, stage_job_prop in enumerate(stage_job_props): stage_job_props[ii] = convert_paths(stage_job_prop) highlevel_job_props = convert_paths(highlevel_job_props) try: job_dir = os.path.join(os.getcwd(), highlevel_job_props['job_dir']) except: job_dir = os.path.join(os.getcwd(), str(cell_id)) highlevel_job_props['job_dir'] = job_dir utility.create_dirpath(job_dir) os.chdir(job_dir) # Change Working directory cty_config_path = os.path.join('user_config', 'cell_config.json') job_config_path = os.path.join('user_config', 'job_config.json') highlevel_jobconfig_path = 'high_level_job_config.json' stage_tracker_path = 'stage_tracker_config.json' utility.create_filepath(cty_config_path) utility.create_filepath(job_config_path) # Save a copy of the config files utility.save_json(cty_config_path, cty_props) utility.save_json(job_config_path, args['job_config']) try: ateamopt_dir = os.path.join( os.path.dirname(ateamopt.__file__), os.pardir) ateamopt_commitID = subprocess.check_output( ["git", "describe", "--tags"], cwd=ateamopt_dir).strip() ateamopt_commitID = ateamopt_commitID.decode() if isinstance( ateamopt_commitID, bytes) else ateamopt_commitID cty_props['ateamopt_tag'] = ateamopt_commitID except Exception as e: logger.debug(e) try: bluepyopt_dir = os.path.join( os.path.dirname(bluepyopt.__file__), os.pardir) bpopt_commitID = subprocess.check_output( ["git", "describe", "--tags"], cwd=bluepyopt_dir).strip() bpopt_commitID = bpopt_commitID.decode() if isinstance( bpopt_commitID, bytes) else bpopt_commitID cty_props['bluepyopt_tag'] = bpopt_commitID except: pass # pickling consistency depends on pandas version pd_version = pd.__version__ cty_props['pandas_version'] = pd_version cell_metadata_path = glob.glob('cell_metadata*.json') if len(cell_metadata_path) == 0: cty_props.update(highlevel_job_props) cell_metadata, cell_metadata_path = cell_data.save_cell_metadata( **cty_props) morph_stats_filename = 'morph_stats_%s.json' % cell_id morph_handler = MorphHandler( cell_metadata['swc_path'], cell_id=cell_id) morph_handler.save_morph_data(morph_stats_filename) elif len(cell_metadata_path) == 1: cell_metadata_path = cell_metadata_path[0] cell_metadata = utility.load_json(cell_metadata_path) else: raise Exception('More than one metadata files found') # Extract ephys data ephys_dir = highlevel_job_props['ephys_dir'] non_standard_nwb = highlevel_job_props['non_standard_nwb'] feature_stimtypes = highlevel_job_props['feature_stimtypes'] highlevel_job_props['nwb_path'] = cell_metadata['nwb_path'] highlevel_job_props['swc_path'] = cell_metadata['swc_path'] nwb_handler = NwbExtractor(cell_id, nwb_path=highlevel_job_props['nwb_path']) data_source = highlevel_job_props["data_source"] if data_source == "lims": ephys_data_path, stimmap_filename = nwb_handler.save_cell_data(feature_stimtypes, non_standard_nwb=non_standard_nwb, ephys_dir=ephys_dir) else: ephys_data_path, stimmap_filename = nwb_handler.save_cell_data_web(feature_stimtypes, non_standard_nwb=non_standard_nwb, ephys_dir=ephys_dir) feature_names_path = highlevel_job_props['feature_names_path'] protocol_dict, feature_dict = nwb_handler.get_efeatures_all(feature_names_path, ephys_data_path, stimmap_filename) feature_dict = correct_voltage_feat_std(feature_dict) all_protocols_filename = os.path.join(ephys_data_path, 'all_protocols.json') all_features_filename = os.path.join(ephys_data_path, 'all_features.json') utility.save_json(all_protocols_filename, protocol_dict) utility.save_json(all_features_filename, feature_dict) highlevel_job_props['stimmap_file'] = os.path.abspath(stimmap_filename) highlevel_job_props['machine'] = cell_metadata['machine'] highlevel_job_props['log_level'] = args['log_level'] highlevel_job_props['all_features_path'] = all_features_filename highlevel_job_props['all_protocols_path'] = all_protocols_filename highlevel_job_props = convert_paths(highlevel_job_props) utility.save_json(highlevel_jobconfig_path, highlevel_job_props) stage_level_jobconfig = {} stage_level_jobconfig['stage_jobconfig'] = stage_job_props.pop(0) stage_level_jobconfig['highlevel_jobconfig'] = highlevel_job_props utility.save_json(stage_tracker_path, stage_job_props) stage_jobdir = os.path.join(highlevel_job_props['job_dir'], stage_level_jobconfig['stage_jobconfig']['stage_name']) stage_level_jobconfig_path = os.path.join( stage_jobdir, 'stage_job_config.json') utility.create_filepath(stage_level_jobconfig_path) utility.save_json(stage_level_jobconfig_path, stage_level_jobconfig) prepare_jobscript_default = utility.locate_script_file( 'prepare_stagejob.py') analyze_jobscript_default = utility.locate_script_file( 'analyze_stagejob.py') shutil.copy(prepare_jobscript_default, stage_jobdir) shutil.copy(analyze_jobscript_default, stage_jobdir) jobtemplate_path = 'job_templates/chainjob_template.sh' chain_job = ChainSubJob(jobtemplate_path, stage_level_jobconfig_path) chain_job.script_generator() chain_job.run_job()
def main(args): # Job config job_config_path = sys.argv[-1] stage_jobconfig = args['stage_jobconfig'] highlevel_job_props = args['highlevel_jobconfig'] logging.basicConfig(level=highlevel_job_props['log_level']) job_dir = highlevel_job_props['job_dir'] path_to_cell_metadata = glob.glob( os.path.join(job_dir, 'cell_metadata*.json'))[0] stage_tracker_path = os.path.join(job_dir, 'stage_tracker_config.json') cell_metadata = utility.load_json(path_to_cell_metadata) cell_id = cell_metadata['cell_id'] peri_model_id = cell_metadata.get('peri_model_id') released_aa_model_path = cell_metadata.get('model_path_all_active') released_aa_model_id = cell_metadata.get('released_aa_model_id') nwb_path = highlevel_job_props['nwb_path'] swc_path = highlevel_job_props['swc_path'] all_features_path = highlevel_job_props['all_features_path'] all_protocols_path = highlevel_job_props['all_protocols_path'] stage_stimtypes = stage_jobconfig['stage_stimtypes'] stage_feature_names_path = stage_jobconfig['stage_features'] param_bounds_path = stage_jobconfig['stage_parameters'] ap_init_flag = stage_jobconfig['AP_initiation_zone'] ap_init_feature = 'check_AISInitiation' script_repo_dir = stage_jobconfig.get('script_repo_dir') depol_block_check = stage_jobconfig.get('depol_block_check') add_fi_kink = stage_jobconfig.get('add_fi_kink') analysis_parallel = (stage_jobconfig['analysis_config'].get('ipyparallel') and stage_jobconfig['run_hof_analysis'] ) # analysis batch job only for hof analysis param_bound_tolerance = stage_jobconfig.get('adjust_param_bounds_prev') prev_stage_path = stage_jobconfig.get('prev_stage_path') filter_rule_func = getattr(filter_rules, stage_jobconfig['filter_rule']) all_features = utility.load_json(all_features_path) all_protocols = utility.load_json(all_protocols_path) stage_feature_names = utility.load_json( stage_feature_names_path)['features'] # AP init flag is prioritized over feature set file if ap_init_flag == 'soma': if ap_init_feature in stage_feature_names: stage_feature_names.remove(ap_init_feature) elif ap_init_flag == 'axon': if ap_init_feature not in stage_feature_names: stage_feature_names.append(ap_init_feature) select_stim_names = [] for stim_name in all_features.keys(): stim_type = stim_name.rsplit('_', 1)[0] stim_type_aibs = utility.aibs_stimname_map_inv[stim_type] if stim_type_aibs in stage_stimtypes: select_stim_names.append(stim_name) features_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) for stim_name, stim_dict in all_features.items(): if stim_name in select_stim_names: for loc, loc_features in stim_dict.items(): for feat, val in loc_features.items(): if feat in stage_feature_names: features_dict[stim_name][loc][feat] = [val[0], val[1]] protocols_dict = { proto_key: proto_val for proto_key, proto_val in all_protocols.items() if proto_key in select_stim_names } nwb_handler = NwbExtractor(cell_id, nwb_path=nwb_path) kwargs = { 'depol_block_check': depol_block_check, 'add_fi_kink': add_fi_kink } if depol_block_check: train_features, test_features, train_protocols, DB_proto_dict = filter_rule_func( features_dict, protocols_dict, **kwargs) # also append DB check info to all_protocols json and save all_protocols['DB_check_DC'] = {'stimuli': DB_proto_dict} utility.save_json(all_protocols_path, all_protocols) else: train_features, test_features, train_protocols = filter_rule_func( features_dict, protocols_dict, **kwargs) train_features_path, test_features_path, train_protocols_path = \ nwb_handler.write_ephys_features(train_features, test_features, train_protocols) # Create the parameter bounds for the optimization if prev_stage_path: prev_stage_model_path = os.path.join( prev_stage_path, 'fitted_params', 'optim_param_%s_compact.json' % cell_id) else: prev_stage_model_path = None model_params_handler = AllActive_Model_Parameters( cell_id, swc_path=swc_path, prev_stage_model_path=prev_stage_model_path, released_aa_model_path=released_aa_model_path) model_params, model_params_release = model_params_handler.get_opt_params( param_bounds_path, prev_stage_tolerance=param_bound_tolerance) param_write_path, released_aa_param_write_path, released_aa_params =\ model_params_handler.write_params_opt(model_params, model_params_release) model_mechs, model_mechs_release = model_params_handler.get_opt_mechanism( model_params, model_params_release, param_bounds_path) mech_write_path, mech_release_write_path = model_params_handler.write_mechanisms_opt( model_mechs, model_mechs_release) props = {} if peri_model_id: peri_model_path = cell_metadata['model_path_perisomatic'] peri_params_write_path, peri_mech_write_path = \ model_params_handler.aibs_peri_to_bpopt(peri_model_path) props['released_peri_model'] = peri_params_write_path props['released_peri_mechanism'] = peri_mech_write_path # Config file with all the necessary paths to feed into the optimization # TODO: clarify how this fits into schema model_params_handler.write_opt_config_file( param_write_path, mech_write_path, mech_release_write_path, train_features_path, test_features_path, train_protocols_path, released_aa_params, released_aa_param_write_path, opt_config_filename=job_config_path, **props) # Copy the optimizer scripts in the current directory optimizer_script = stage_jobconfig['optim_config']['main_script'] analysis_script = stage_jobconfig['analysis_config']['main_script'] if script_repo_dir: optimizer_script_repo = os.path.abspath( os.path.join(script_repo_dir, optimizer_script)) optimizer_script_repo = optimizer_script_repo if os.path.exists(optimizer_script_repo)\ else None else: optimizer_script_repo = None optimizer_script_default = utility.locate_script_file(optimizer_script) optimizer_script_path = optimizer_script_repo or optimizer_script_default stage_cwd = os.getcwd() shutil.copy(optimizer_script_path, stage_cwd) next_stage_job_props = utility.load_json(stage_tracker_path) machine = highlevel_job_props['machine'] machine_match_patterns = ['hpc-login', 'aws', 'cori', 'bbp5'] next_stage_jobconfig = {} try: next_stage_jobconfig['stage_jobconfig'] = next_stage_job_props.pop(0) next_stage_jobconfig['highlevel_jobconfig'] = highlevel_job_props next_stage_jobconfig['stage_jobconfig']['prev_stage_path'] = os.getcwd( ) chainjobtemplate_path = 'job_templates/chainjob_template.sh' except: pass utility.save_json(stage_tracker_path, next_stage_job_props) # Create batch jobscript if not any(substr in machine for substr in machine_match_patterns): testJob = test_JobModule('batch_job.sh', job_config_path=job_config_path) testJob.script_generator(next_stage_job_config=next_stage_jobconfig) elif any(pattern in machine for pattern in ['hpc-login', 'aws']): jobtemplate_path = 'job_templates/pbs_jobtemplate.sh' batch_job = PBS_JobModule(jobtemplate_path, job_config_path) if analysis_parallel: batch_job.script_generator(analysis_jobname='analyze_job.sh') # A separate batch job needs to be created in this case analysis_job = PBS_JobModule(jobtemplate_path, job_config_path, script_name='analyze_job.sh') analysis_job.script_generator( analysis=True, next_stage_job_config=next_stage_jobconfig) else: batch_job.script_generator( next_stage_job_config=next_stage_jobconfig) elif any(pattern in machine for pattern in ['cori', 'bbp5']): if 'cori' in machine: jobtemplate_path = 'job_templates/nersc_slurm_jobtemplate.sh' else: jobtemplate_path = 'job_templates/bbp_slurm_jobtemplate.sh' batch_job = Slurm_JobModule(jobtemplate_path, job_config_path) if analysis_parallel: batch_job.script_generator(analysis_jobname='analyze_job.sh') # A separate batch job needs to be created in this case analysis_job = Slurm_JobModule(jobtemplate_path, job_config_path, script_name='analyze_job.sh') analysis_job.script_generator( analysis=True, next_stage_job_config=next_stage_jobconfig) else: batch_job.script_generator( next_stage_job_config=next_stage_jobconfig) if next_stage_jobconfig: stage_jobdir = os.path.join( highlevel_job_props['job_dir'], next_stage_jobconfig['stage_jobconfig']['stage_name']) next_stage_jobconfig_path = os.path.join(stage_jobdir, 'stage_job_config.json') utility.create_filepath(next_stage_jobconfig_path) utility.save_json(next_stage_jobconfig_path, next_stage_jobconfig) prepare_jobscript_default = utility.locate_script_file( 'prepare_stagejob.py') analyze_jobscript_default = utility.locate_script_file(analysis_script) shutil.copy(prepare_jobscript_default, stage_jobdir) shutil.copy(analyze_jobscript_default, stage_jobdir) chain_job = ChainSubJob(chainjobtemplate_path, next_stage_jobconfig_path) chain_job.script_generator()
def get_efeatures_all(self, feature_set_filename, ephys_data_path, stimmap_filename, *args, **kwargs): cell_name = self.cell_id feature_map = utility.load_json(feature_set_filename) features_meanstd = defaultdict( lambda: defaultdict( lambda: defaultdict(dict))) stim_map = self.get_stim_map( os.path.join(ephys_data_path, stimmap_filename)) for stim_name in stim_map.keys(): stim_type = utility.aibs_stimname_map_inv[stim_name.rsplit('_', 1)[0]] stim_features = feature_map.get(stim_type) # Features to extract if not stim_features: continue logger.debug("\n### Getting features from %s of cell %s ###\n" % (stim_name, cell_name)) sweeps = [] for sweep_filename in stim_map[stim_name]['stimuli'][0]['sweep_filenames']: sweep_fullpath = os.path.join( ephys_data_path, sweep_filename) data = np.loadtxt(sweep_fullpath) tot_duration = stim_map[stim_name]['stimuli'][0]['totduration'] time, voltage = data[:, 0], data[:, 1] # Limit the duration of stim for correct stim end feature calculation time, voltage = time[time <= tot_duration], voltage[time <= tot_duration] # Prepare sweep for eFEL sweep = {} sweep['T'] = time sweep['V'] = voltage sweep['stim_start'] = [ stim_map[stim_name]['stimuli'][0]['delay']] sweep['stim_end'] = [ stim_map[stim_name]['stimuli'][0]['stim_end']] if 'check_AISInitiation' in stim_features: sweep['T;location_AIS'] = time sweep['V;location_AIS'] = voltage sweep['stim_start;location_AIS'] = [ stim_map[stim_name]['stimuli'][0]['delay']] sweep['stim_end;location_AIS'] = [ stim_map[stim_name]['stimuli'][0]['stim_end']] sweeps.append(sweep) # eFEL feature extraction feature_results = efel.getFeatureValues(sweeps, stim_features) for feature_name in stim_features: # For one feature, a list with values for every sweep feature_values_over_trials = [trace_dict[feature_name].tolist() for trace_dict in feature_results if trace_dict[feature_name] is not None] feature_mean_over_trials = [np.nanmean(trace_dict[feature_name]) for trace_dict in feature_results if trace_dict[feature_name] is not None] if len(feature_mean_over_trials) == 0: continue else: mean = np.nanmean( list(itertools.chain.from_iterable(feature_values_over_trials))) std = (np.nanstd(list(itertools.chain.from_iterable(feature_values_over_trials))) or 0.05*np.abs(mean) or 0.05) if feature_name == 'peak_time': mean, std = None, None features_meanstd[stim_name]['soma'][ feature_name] = [mean, std, feature_values_over_trials] return stim_map, features_meanstd
def test_Stage2_parameters(self): # Mouse spiny cell_id_spiny = self.mouse_spiny_id # Create the parameter bounds for the optimization prev_params_file = 'Stage1_fit_spiny.json' model_params_handler_spiny = AllActive_Model_Parameters( cell_id_spiny, prev_model_pattern=prev_params_file) param_bounds_file = 'param_bounds_stage2.json' param_bounds_path = utility.locate_template_file( os.path.join('parameters', param_bounds_file)) param_rule_func = adjust_param_bounds model_params_spiny, model_params_release = model_params_handler_spiny.get_opt_params( param_bounds_path, param_rule_func) model_mechs_spiny, model_mechs_release = model_params_handler_spiny.get_opt_mechanism( model_params_spiny, model_params_release, param_bounds_path) model_params_dict_spiny = convert_model_params_to_dict( model_params_spiny) mouse_spiny_stage2_params = os.path.join(self.mouse_spiny_path, 'Stage2_parameters.json') mouse_spiny_stage2_mechs = os.path.join(self.mouse_spiny_path, 'Stage2_mechanism.json') model_params_spiny_true = utility.load_json(mouse_spiny_stage2_params) model_mechs_spiny_true = utility.load_json(mouse_spiny_stage2_mechs) model_params_spiny_true_dict = convert_model_params_to_dict( model_params_spiny_true) self.assertEqual(model_params_spiny_true_dict, model_params_dict_spiny) for mech_key, mech_val in model_mechs_spiny.items(): self.assertEqual(set(model_mechs_spiny_true[mech_key]), set(mech_val)) # Mouse aspiny cell_id_aspiny = self.mouse_aspiny_id # Create the parameter bounds for the optimization prev_params_file = 'Stage1_fit_aspiny.json' model_params_handler_aspiny = AllActive_Model_Parameters( cell_id_aspiny, swc_search_pattern='reconstruction.swc', prev_model_pattern=prev_params_file) param_bounds_file = 'param_bounds_stage2_mouse_aspiny.json' param_bounds_path = os.path.join(self.mouse_aspiny_path, param_bounds_file) param_rule_func = adjust_param_bounds model_params_aspiny, model_params_release = model_params_handler_aspiny.\ get_opt_params(param_bounds_path, param_rule_func) model_mechs_aspiny, model_mechs_release = model_params_handler_aspiny.\ get_opt_mechanism(model_params_aspiny, model_params_release, param_bounds_path) model_params_dict_aspiny = convert_model_params_to_dict( model_params_aspiny) model_params_dict_aspiny = convert_model_params_to_dict( model_params_aspiny) mouse_aspiny_stage2_params = os.path.join(self.mouse_aspiny_path, 'Stage2_parameters.json') mouse_aspiny_stage2_mechs = os.path.join(self.mouse_aspiny_path, 'Stage2_mechanism.json') model_params_aspiny_true = utility.load_json( mouse_aspiny_stage2_params) model_mechs_aspiny_true = utility.load_json(mouse_aspiny_stage2_mechs) model_params_aspiny_true_dict = convert_model_params_to_dict( model_params_aspiny_true) self.assertEqual(model_params_aspiny_true_dict, model_params_dict_aspiny) for mech_key, mech_val in model_mechs_aspiny.items(): self.assertEqual(set(model_mechs_aspiny_true[mech_key]), set(mech_val))
def test_Stage0_features(self): # Mouse spiny cell_id_spiny = self.mouse_spiny_id nwb_handler_spiny = NwbExtractor(cell_id_spiny, nwb_search_pattern=cell_id_spiny) acceptable_stimtypes = ['Long Square'] ephys_dir = os.path.join(self.mouse_spiny_path, 'mouse_spiny_ephys') ephys_data_path, stimmap_filename = \ nwb_handler_spiny.save_cell_data(acceptable_stimtypes, ephys_dir=ephys_dir) feature_path = utility.locate_template_file( os.path.join('parameters', 'feature_set_stage0.json')) train_features_spiny, _, _, train_protocols_spiny, _ = \ nwb_handler_spiny.get_ephys_features(feature_path, ephys_data_path, stimmap_filename, filter_feat_proto_passive) train_features_dict_spiny = convert_stim_feat_to_dict( train_features_spiny, 'soma') train_protocols_dict_spiny = convert_protocols_to_dict( train_protocols_spiny, 'stimuli') mouse_spiny_stage0_features = os.path.join(self.mouse_spiny_path, 'Stage0_features.json') mouse_spiny_stage0_protocols = os.path.join(self.mouse_spiny_path, 'Stage0_protocols.json') model_features_spiny_true = utility.load_json( mouse_spiny_stage0_features) model_protocols_spiny_true = utility.load_json( mouse_spiny_stage0_protocols) model_features_spiny_true_dict = convert_stim_feat_to_dict( model_features_spiny_true, 'soma') model_protocols_spiny_true_dict = convert_protocols_to_dict( model_protocols_spiny_true, 'stimuli') self.assertAlmostEqual(model_features_spiny_true_dict, train_features_dict_spiny) self.assertAlmostEqual(model_protocols_spiny_true_dict, train_protocols_dict_spiny) # Mouse aspiny cell_id_aspiny = self.mouse_aspiny_id nwb_handler_aspiny = NwbExtractor(cell_id_aspiny, nwb_search_pattern=cell_id_aspiny) acceptable_stimtypes = ['Long Square'] ephys_dir = os.path.join(self.mouse_aspiny_path, 'mouse_aspiny_ephys') ephys_data_path, stimmap_filename = \ nwb_handler_aspiny.save_cell_data(acceptable_stimtypes, ephys_dir=ephys_dir) feature_path = utility.locate_template_file( os.path.join('parameters', 'feature_set_stage0.json')) train_features_aspiny, _, _, train_protocols_aspiny, _ = \ nwb_handler_aspiny.get_ephys_features(feature_path, ephys_data_path, stimmap_filename, filter_feat_proto_passive) train_features_dict_aspiny = convert_stim_feat_to_dict( train_features_aspiny, 'soma') train_protocols_dict_aspiny = convert_protocols_to_dict( train_protocols_aspiny, 'stimuli') mouse_aspiny_stage0_features = os.path.join(self.mouse_aspiny_path, 'Stage0_features.json') mouse_aspiny_stage0_protocols = os.path.join(self.mouse_aspiny_path, 'Stage0_protocols.json') model_features_aspiny_true = utility.load_json( mouse_aspiny_stage0_features) model_protocols_aspiny_true = utility.load_json( mouse_aspiny_stage0_protocols) model_features_aspiny_true_dict = convert_stim_feat_to_dict( model_features_aspiny_true, 'soma') model_protocols_aspiny_true_dict = convert_protocols_to_dict( model_protocols_aspiny_true, 'stimuli') self.assertAlmostEqual(model_features_aspiny_true_dict, train_features_dict_aspiny) self.assertAlmostEqual(model_protocols_aspiny_true_dict, train_protocols_dict_aspiny)
def script_generator(self, chain_job='chain_job.sh', **kwargs): job_config = utility.load_json(self.job_config_path) stage_jobconfig = job_config['stage_jobconfig'] highlevel_job_props = job_config['highlevel_jobconfig'] analysis_flag = kwargs.get( 'analysis') # this means prepare a batch script for analysis if highlevel_job_props['dryrun']: stage_jobconfig = update(stage_jobconfig, dryrun_config) job_config['stage_jobconfig'] = stage_jobconfig utility.save_json(self.job_config_path, job_config) analysis_config = stage_jobconfig['analysis_config'] with open(self.script_template, 'r') as job_template: batchjob_string = job_template.read() jobname = '%s.%s' % (os.path.basename( highlevel_job_props['job_dir']), stage_jobconfig['stage_name']) if analysis_flag: jobname += '.analysis' seed_string = ''.join( ['%s ' % seed_ for seed_ in stage_jobconfig['seed']]) # High level job config batchjob_string = re.sub('conda_env', highlevel_job_props['conda_env'], batchjob_string) batchjob_string = re.sub('jobname', jobname, batchjob_string) batchjob_string = re.sub('jobscript_name', self.script_name, batchjob_string) if highlevel_job_props.get('email'): batchjob_string = re.sub('email', highlevel_job_props['email'], batchjob_string) # Only related to optimization batchjob_string = re.sub('seed_list', seed_string, batchjob_string) batchjob_string = re.sub('analysis_script', analysis_config['main_script'], batchjob_string) batchjob_string = re.sub('job_config_path', self.job_config_path, batchjob_string) # Job config analysis vs optimization if analysis_flag: hpc_job_config = analysis_config batchjob_string = re.sub('# Run[\S\s]*pids', '', batchjob_string) else: hpc_job_config = stage_jobconfig['optim_config'] # Within the batch job script change the analysis launch to batch analysis if analysis_config.get( 'ipyparallel') and stage_jobconfig['run_hof_analysis']: analysis_jobname = kwargs.get('analysis_jobname') batchjob_string = re.sub('# Analyze[\S\s]*.json', 'qsub %s' % analysis_jobname, batchjob_string) # if there is a next stage chain the job if bool(kwargs.get('next_stage_job_config')): batchjob_string += 'bash %s\n' % chain_job hpc_job_parameters = [ 'jobmem', 'ipyparallel_db', 'qos', 'main_script', 'jobtime', 'error_stream', 'output_stream', 'nnodes', 'nprocs', 'nengines' ] for hpc_param in hpc_job_parameters: batchjob_string = re.sub(hpc_param, str(hpc_job_config[hpc_param]), batchjob_string) with open(self.script_name, "w") as batchjob_script: batchjob_script.write(batchjob_string)
def get_ephys_features(self, feature_set_filename, ephys_data_path, stimmap_filename, filter_rule_func, *args, **kwargs): cell_name = self.cell_id feature_map = utility.load_json(feature_set_filename) stim_features = feature_map['features'] # Features to extract features_meanstd = defaultdict( lambda: defaultdict( lambda: defaultdict(dict))) features_meanstd_lite = defaultdict( lambda: defaultdict( lambda: defaultdict(dict))) # if additional dendritic recordings if 'location' in kwargs: record_locations = kwargs['locations'] else: record_locations = None stim_map = self.get_stim_map(os.path.join(ephys_data_path, stimmap_filename), record_locations=record_locations) cell_stim_map = stim_map.copy() training_stim_map = dict() spiketimes_noise = defaultdict(list) for stim_name in stim_map.keys(): if 'feature_reject_stim_type' in kwargs: if any(reject_feat_stim in stim_name for reject_feat_stim in kwargs['feature_reject_stim_type']): continue logger.debug("\n### Getting features from %s of cell %s ###\n" % (stim_name, cell_name)) sweeps = [] for sweep_filename in stim_map[stim_name]['stimuli'][0]['sweep_filenames']: sweep_fullpath = os.path.join( ephys_data_path, sweep_filename) data = np.loadtxt(sweep_fullpath) time = data[:, 0] voltage = data[:, 1] # Prepare sweep for eFEL sweep = {} sweep['T'] = time sweep['V'] = voltage sweep['stim_start'] = [ stim_map[stim_name]['stimuli'][0]['delay']] sweep['stim_end'] = [ stim_map[stim_name]['stimuli'][0]['stim_end']] sweep['T;location_AIS'] = time sweep['V;location_AIS'] = voltage sweep['stim_start;location_AIS'] = [ stim_map[stim_name]['stimuli'][0]['delay']] sweep['stim_end;location_AIS'] = [ stim_map[stim_name]['stimuli'][0]['stim_end']] sweeps.append(sweep) if 'Noise' in stim_name: feature_results = efel.getFeatureValues(sweeps, ['peak_time']) for feature_result in feature_results: spiketimes_noise[stim_name].append( feature_result['peak_time']) continue # eFEL feature extraction feature_results = efel.getFeatureValues(sweeps, stim_features) for feature_name in stim_features: # For one feature, a list with values for every sweep feature_values = [np.mean(trace_dict[feature_name]) for trace_dict in feature_results if trace_dict[feature_name] is not None] if len(feature_values) == 0: continue elif len(feature_values) == 1: mean = feature_values[0] std = 0.05 * abs(mean) elif len(feature_values) > 1: mean = np.mean(feature_values) std = np.std(feature_values) if std == 0 and len(feature_values) != 1: std = 0.05 * abs(mean)/math.sqrt(len(feature_values)) if math.isnan(mean) or math.isnan(std): continue if mean == 0: std = 0.05 if feature_name in ['voltage_base', 'steady_state_voltage'] \ and len(feature_values) == 1: std = 0 features_meanstd[stim_name]['soma'][ feature_name] = [mean, std] # Remove depolarization block and check initiation from all features list if feature_name not in ['depol_block', 'check_AISInitiation']: features_meanstd_lite[stim_name]['soma'][ feature_name] = [mean, std] if stim_name in features_meanstd.keys(): training_stim_map[stim_name] = cell_stim_map[stim_name] if kwargs.get('spiketimes_exp_path'): spiketimes_exp_path = kwargs['spiketimes_exp_path'] if len(spiketimes_noise.keys()) > 0: utility.create_filepath(spiketimes_exp_path) utility.save_pickle(spiketimes_exp_path, spiketimes_noise) features_meanstd_filtered, untrained_features_dict, training_stim_map_filtered,\ all_stim_filtered = filter_rule_func(features_meanstd.copy(), training_stim_map, cell_stim_map, *args) features_meanstd_lite = correct_voltage_feat_std(features_meanstd_lite) return features_meanstd_filtered, untrained_features_dict,\ features_meanstd_lite, training_stim_map_filtered,\ all_stim_filtered
metadata_list = ['Cell_id', 'ttype'] for idx, cell_id in enumerate(cell_ids): cell_efeatures_dir = os.path.join(efel_feature_path, cell_id) cell_protocols_filename = os.path.join(cell_efeatures_dir, 'protocols.json') cell_features_filename = os.path.join(efel_feature_path, cell_id, 'features.json') stimmap_data_path = os.path.join(efel_feature_path, cell_id, 'StimMapReps.csv') stimmap_data = pd.read_csv(stimmap_data_path, sep='\s*,\s*', header=0, encoding='ascii', engine='python') stimmap_data['Amplitude_Start'] *= 1e12 stimmap_data['Amplitude_Start'] = round(stimmap_data['Amplitude_Start'], 2) stimmap_data['Amplitude_End'] *= 1e12 cell_features = utility.load_json(cell_features_filename) cell_protocols = utility.load_json(cell_protocols_filename) stimmap_data = stimmap_data.sort_values(by='Amplitude_Start') ttype = sdk_data.loc[sdk_data.Cell_id == cell_id, 'ttype'].values[0] rheobase = sdk_data.loc[sdk_data.Cell_id == cell_id, 'rheobase'].values[0] spiking_stims = stimmap_data.loc[stimmap_data['Amplitude_Start'] >= rheobase, 'DistinctID'].tolist() spiking_feature_dicts = [cell_features[stim_name]['soma'] for stim_name in spiking_stims if stim_name in cell_features.keys()] spiking_features_df_list.append(get_feature_vec( spiking_feature_dicts, spiking_features, cell_id, ttype)) spiking_features_df = pd.DataFrame(spiking_features_df_list)