def select_features(request): ''' This function serves the application select-features page ''' # if not ctx exit the application if not "ctx" in request.session: return render(request, 'efelg/hbp_redirect.html', { "status": "KO", "message": "Problem" }) # read features groups with open(os.path.join(settings.BASE_DIR, 'static', \ 'efelg', 'efel_features_final.json')) as json_file: features_dict = json.load(json_file) feature_names = efel.getFeatureNames() selected_traces_rest = request.POST.get('data') selected_traces_rest_json = json.loads(selected_traces_rest) request.session['selected_traces_rest_json'] = selected_traces_rest_json accesslogger.info(resources.string_for_log('select_features', request, \ page_spec_string = selected_traces_rest)) return render(request, 'efelg/select_features.html')
def test_getFeatureNames(): """basic: Testing getting all feature names""" import efel import json with open('featurenames.json', 'r') as featurenames_json: expected_featurenames = json.load(featurenames_json) nt.assert_equal(efel.getFeatureNames(), expected_featurenames)
def test_getFeatureNames(): """basic: Testing getting all feature names""" import efel efel.reset() import json with open('featurenames.json', 'r') as featurenames_json: expected_featurenames = json.load(featurenames_json) nt.assert_equal(efel.getFeatureNames(), expected_featurenames)
def test_getFeatureNames(): """basic: Test getting all feature names""" import efel efel.reset() import json test_data_path = os.path.join(testdata_dir, '..', 'featurenames.json') with open(test_data_path, 'r') as featurenames_json: expected_featurenames = json.load(featurenames_json) nt.assert_equal(efel.getFeatureNames(), expected_featurenames)
def test_getFeatureNames(): """basic: Testing getting all feature names""" import efel efel.reset() import json test_data_path = joinp(testdata_dir, '..', 'featurenames.json') with open(test_data_path, 'r') as featurenames_json: expected_featurenames = json.load(featurenames_json) nt.assert_equal(efel.getFeatureNames(), expected_featurenames)
def setUp(self): folder = os.path.dirname(os.path.realpath(__file__)) self.time = np.load(os.path.join(folder, "data/t_test.npy")) self.values = np.load(os.path.join(folder, "data/U_test.npy")) self.implemented_features = efel.getFeatureNames() self.features = EfelFeatures(verbose_level="error") self.info = {} self.info["stimulus_start"] = self.time[0] self.info["stimulus_end"] = self.time[-10]
def more_challenging(model): ''' Isolate harder code, still wrangling data types. When this is done, EFEL might be able to report back about input resistance. ''' single_spike = {} single_spike['APWaveForm'] = [float(v) for v in model.vm_rheobase] #temp_vm single_spike['T'] = [ float(t) for t in model.vm_rheobase.times.rescale('ms') ] single_spike['V'] = [float(v) for v in model.vm_rheobase] #temp_vm single_spike['stim_start'] = [float(model.protocol['Time_Start'])] single_spike['stimulus_current'] = [model.model.rheobase_current] single_spike['stim_end'] = [trace15['T'][-1]] single_spike = [single_spike] ## # How EFEL could learn about input resistance of model ## trace_ephys_prop = {} trace_ephys_prop[ 'stimulus_current'] = model.druckmann2013_input_resistance_currents[ 0] # = druckmann2013_input_resistance_currents[0] trace_ephys_prop['V'] = [float(v) for v in model.vminh] trace_ephys_prop['T'] = [float(t) for t in model.vminh.times.rescale('ms')] trace_ephys_prop['stim_end'] = [trace15['T'][-1]] trace_ephys_prop['stim_start'] = [float(model.inh_protocol['Time_Start']) ] # = in_current_filter[0]['Time_End'] trace_ephys_props = [trace_ephys_prop] efel_results_inh = efel.getFeatureValues(trace_ephys_props, list(efel.getFeatureNames())) # efel_results_ephys = efel.getFeatureValues(trace_ephys_prop, list(efel.getFeatureNames())) # return efel_results_inh
def select_features(request): """ This function serves the application select-features page """ # if not ctx exit the application if "ctx" not in request.session: return render(request, 'efelg/overview.html') feature_names = efel.getFeatureNames() selected_traces_rest = request.POST.get('data') request.session['selected_traces_rest_json'] = json.loads( selected_traces_rest) request.session['global_parameters_json'] = json.loads( request.POST.get('global_parameters')) #accesslogger.info(resources.string_for_log('select_features', request, page_spec_string=selected_traces_rest)) return render(request, 'efelg/select_features.html')
def __init__(self, new_features=None, features_to_run="all", adaptive=None, labels={}, strict=True, verbose_level="info", verbose_filename=None): if not prerequisites: raise ImportError("Efel features require: efel") efel.reset() implemented_labels = {} def efel_wrapper(feature_name): def feature_function(time, values, info): disable = False if "stimulus_start" not in info: if strict: raise ValueError( "Efel features require info['stimulus_start']. " "No 'stimulus_start' found in info, " "Set 'stimulus_start', or set strict to " "False to use initial time as stimulus start") else: info["stimulus_start"] = time[0] self.logger.warning( "Efel features require info['stimulus_start']. " "No 'stimulus_start' found in info, " "setting stimulus start as initial time") if "stimulus_end" not in info: if strict: raise ValueError( "Efel features require info['stimulus_end']. " "No 'stimulus_end' found in info, " "Set 'stimulus_start', or set strict to " "False to use end time as stimulus end") else: info["stimulus_end"] = time[-1] self.logger.warning( "Efel features require info['stimulus_start']. " "No 'stimulus_end' found in info, " "setting stimulus end as end time") if info["stimulus_start"] >= info["stimulus_end"]: raise ValueError("stimulus_start >= stimulus_end.") trace = {} trace["T"] = time trace["V"] = values trace["stim_start"] = [info["stimulus_start"]] trace["stim_end"] = [info["stimulus_end"]] # Disable decay_time_constant_after_stim if no time points left # in simulation after stimulation has ended. # Otherwise it thros an error if feature_name == "decay_time_constant_after_stim": if info["stimulus_end"] >= time[-1]: return None, None result = efel.getMeanFeatureValues([trace], [feature_name], raise_warnings=False) return None, result[0][feature_name] feature_function.__name__ = feature_name return feature_function super(EfelFeatures, self).__init__(new_features=new_features, features_to_run=features_to_run, adaptive=adaptive, new_utility_methods=[], labels=implemented_labels, verbose_level=verbose_level, verbose_filename=verbose_filename) for feature_name in efel.getFeatureNames(): self.add_features(efel_wrapper(feature_name)) self.labels = labels self.features_to_run = features_to_run
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import efel import json if __name__ == '__main__': print("Updating featurenames.json ...", end=' ') efel.cppcore.Initialize(efel.getDependencyFileLocation(), "log") with open('featurenames.json', 'w') as featurenames_json: feature_names = efel.getFeatureNames() json.dump( feature_names, featurenames_json, indent=4, separators=( ',', ': ')) print("done")
def get_allfeature_values(): """Get back all the feature names and value""" import efel efel.reset() import numpy all_featurenames = efel.getFeatureNames() soma_data = numpy.loadtxt(os.path.join(testdata_dir, 'testdata.txt')) soma_time = soma_data[:, 0] soma_voltage = soma_data[:, 1] bac_data = numpy.loadtxt(os.path.join(testdata_dir, 'testbacdata.txt')) bac_time = bac_data[:, 0] bac_voltage = bac_data[:, 1] bap1_data = numpy.loadtxt(os.path.join(testdata_dir, 'testbap1data.txt')) bap1_time = bap1_data[:, 0] bap1_voltage = bap1_data[:, 1] bap2_data = numpy.loadtxt(os.path.join(testdata_dir, 'testbap2data.txt')) bap2_time = bap2_data[:, 0] bap2_voltage = bap2_data[:, 1] trace = {} trace['T'] = soma_time trace['V'] = soma_voltage trace['stim_start'] = [700] trace['stim_end'] = [2700] trace['T;location_AIS'] = soma_time trace['V;location_AIS'] = soma_voltage trace['stim_start;location_AIS'] = [700] trace['stim_end;location_AIS'] = [2700] trace['T;location_epsp'] = bac_time trace['V;location_epsp'] = bac_voltage trace['stim_start;location_epsp'] = [295] trace['stim_end;location_epsp'] = [600] trace['T;location_dend1'] = bap1_time trace['V;location_dend1'] = bap1_voltage trace['stim_start;location_dend1'] = [295] trace['stim_end;location_dend1'] = [500] trace['T;location_dend2'] = bap2_time trace['V;location_dend2'] = bap2_voltage trace['stim_start;location_dend2'] = [295] trace['stim_end;location_dend2'] = [500] bpap_featurenames = [ 'BPAPHeightLoc1', 'BPAPHeightLoc2', 'BPAPAmplitudeLoc1', 'BPAPAmplitudeLoc2' ] bac_featurenames = ['BAC_width'] soma_featurenames = all_featurenames[:] for feature_name in bpap_featurenames: soma_featurenames.remove(feature_name) for feature_name in bac_featurenames: soma_featurenames.remove(feature_name) import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") feature_values = efel.getFeatureValues([trace], soma_featurenames)[0] with warnings.catch_warnings(): warnings.simplefilter("ignore") efel.setThreshold(-30) feature_values = dict( list(feature_values.items()) + list(efel.getFeatureValues([trace], bpap_featurenames)[0].items())) with warnings.catch_warnings(): warnings.simplefilter("ignore") efel.setThreshold(-55) feature_values = dict( list(feature_values.items()) + list(efel.getFeatureValues([trace], bac_featurenames)[0].items())) for feature_name in feature_values: if feature_values[feature_name] is not None: feature_values[feature_name] = list(feature_values[feature_name]) return feature_values
import matplotlib.pyplot as plt plt.plot([1,0],[0,1]) plt.show() import pickle import make_allen_tests_from_id# import * from make_allen_tests_from_id import * from neuronunit.optimisation.optimization_management import dtc_to_rheo, inject_and_plot_model30,check_bin_vm30,check_bin_vm15 import efel import pandas as pd import seaborn as sns list(efel.getFeatureNames()); from utils import dask_map_function import bluepyopt as bpop import bluepyopt.ephys as ephys import pickle from sciunit.scores import ZScore, RatioScore from sciunit import TestSuite from sciunit.scores.collections import ScoreArray import sciunit import numpy as np from neuronunit.optimisation.optimization_management import dtc_to_rheo, switch_logic,active_values from neuronunit.tests.base import AMPL, DELAY, DURATION import quantities as pq PASSIVE_DURATION = 500.0*pq.ms
import argschema as ags import efel efel.getFeatureNames() class Top_JobConfig(ags.schemas.DefaultSchema): ''' Schema for the top-level job configuration ''' job_dir = ags.fields.Str(description="") conda_env = ags.fields.Str(description="") nwb_path = ags.fields.InputFile(description="") swc_path = ags.fields.InputFile(description="") data_source = ags.fields.OptionList(description="", options=['lims', 'web', 'local'], default='web') axon_type = ags.fields.Str(description="") ephys_dir = ags.fields.Str(description="") non_standard_nwb = ags.fields.Boolean(description="") feature_stimtypes = ags.fields.List(ags.fields.Str, description="") feature_names_path = ags.fields.InputFile(description="") email = ags.fields.List(ags.fields.Email, description="") stimmap_file = ags.fields.Str(description="") machine = ags.fields.Str(description="") log_level = ags.fields.LogLevel(description='', default='DEBUG') all_features_path = ags.fields.Str(description="") all_protocols_path = ags.fields.Str(description="") dryrun = ags.fields.Boolean( default=False, description=
def extract_features(request): # if not ctx exit the application if not "ctx" in request.session: return render(request, 'efelg/hbp_redirect.html') data_dir = request.session['data_dir'] json_dir = request.session['json_dir'] selected_traces_rest_json = request.session['selected_traces_rest_json'] allfeaturesnames = efel.getFeatureNames() crr_user_folder = request.session['time_info'] full_crr_result_folder = request.session['u_crr_res_r_dir'] full_crr_uploaded_folder = request.session['u_up_dir'] full_crr_data_folder = request.session['u_crr_res_d_dir'] full_crr_user_folder = request.session['user_crr_res_dir'] check_features = request.session["check_features"] request.session['selected_features'] = check_features cell_dict = {} selected_traces_rest = [] for k in selected_traces_rest_json: #crr_vcorr = selected_traces_rest_json[k]['vcorr'] crr_file_rest_name = k + '.json' crr_name_split = k.split('____') crr_cell_name = crr_name_split[5] crr_sample_name = crr_name_split[6] crr_key = crr_name_split[0] + '____' + crr_name_split[1] + '____' + \ crr_name_split[2] + '____' + crr_name_split[3] + '____' + \ crr_name_split[4] + '____' + crr_name_split[5] if os.path.isfile(os.path.join(json_dir, crr_file_rest_name)): crr_json_file = os.path.join(json_dir, crr_file_rest_name) elif os.path.isfile(os.path.join(full_crr_uploaded_folder, \ crr_file_rest_name)): crr_json_file = os.path.join(full_crr_uploaded_folder, \ crr_file_rest_name) else: continue with open(crr_json_file) as f: crr_file_dict_read = f.read() crr_file_dict = json.loads(crr_file_dict_read) crr_file_all_stim = crr_file_dict['traces'].keys() crr_file_sel_stim = selected_traces_rest_json[k]['stim'] crr_cell_data_folder = os.path.join(full_crr_data_folder, crr_cell_name) crr_cell_data_folder = full_crr_data_folder if not os.path.exists(crr_cell_data_folder): os.makedirs(crr_cell_data_folder) shutil.copy2(crr_json_file, crr_cell_data_folder) # if crr_key in cell_dict: cell_dict[crr_key]['stim'].append(crr_file_sel_stim) cell_dict[crr_key]['files'].append(k) #cell_dict[crr_key]['vcorr'].append(float(crr_vcorr)) else: cell_dict[crr_key] = {} cell_dict[crr_key]['stim'] = [crr_file_sel_stim] cell_dict[crr_key]['files'] = [k] cell_dict[crr_key]['cell_name'] = crr_cell_name cell_dict[crr_key]['all_stim'] = crr_file_all_stim #cell_dict[crr_key]['vcorr'] = [float(crr_vcorr)] target = [] final_cell_dict = {} final_exclude = [] for key in cell_dict: crr_el = cell_dict[key] #v_corr = crr_el['vcorr'] v_corr = 0 for c_stim_el in crr_el['stim']: [ target.append(float(i)) for i in c_stim_el if float(i) not in target ] exc_stim_lists = [list(set(crr_el['all_stim']) - set(sublist)) for \ sublist in crr_el['stim']] crr_exc = [] for crr_list in exc_stim_lists: crr_stim_val = [float(i) for i in crr_list] crr_exc.append(crr_stim_val) final_cell_dict[cell_dict[key]['cell_name']] = {'v_corr':v_corr, \ 'ljp':0, 'experiments':{'step': {'location':'soma', 'files': \ [str(i) for i in crr_el['files']]}}, 'etype':'etype', \ 'exclude':crr_exc} # build configuration dictionary config = {} config['features'] = {'step': [str(i) for i in check_features]} config['path'] = crr_cell_data_folder config['format'] = 'ibf_json' config['comment'] = [] config['cells'] = final_cell_dict config['options'] = {'relative': False, 'tolerance': 0.02, \ 'target': target, 'delay': 500, 'nanmean': False, 'logging':False, \ 'nangrace': 0, 'spike_threshold': 1, 'amp_min': 0, 'strict_stiminterval': {'base': True}} try: extractor = bpefe.Extractor(full_crr_result_folder, config, use_git=False) extractor.create_dataset() extractor.plt_traces() extractor.extract_features() extractor.mean_features() extractor.plt_features() extractor.feature_config_cells(version="legacy") extractor.feature_config_all(version="legacy") except: return render(request, 'efelg/hbp_redirect.html', {"status":"KO", \ "message": "An error occured while extracting the features. \ Either you selected too many data or the traces were corrupted." }) conf_dir = request.session['conf_dir'] conf_cit = os.path.join(conf_dir, 'citation_list.json') final_cit_file = os.path.join(full_crr_result_folder, 'HOWTOCITE.txt') resources.print_citations(selected_traces_rest_json, conf_cit, final_cit_file) crr_result_folder = request.session['time_info'] output_path = os.path.join(full_crr_user_folder, crr_user_folder + \ '_results.zip') request.session['result_file_zip'] = output_path request.session['result_file_zip_name'] = crr_user_folder + '_results.zip' parent_folder = os.path.dirname(full_crr_result_folder) contents = os.walk(full_crr_result_folder) try: zip_file = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) for root, folders, files in contents: for folder_name in folders: absolute_path = os.path.join(root, folder_name) relative_path = absolute_path.replace(parent_folder + \ os.sep, '') zip_file.write(absolute_path, relative_path) for file_name in files: absolute_path = os.path.join(root, file_name) relative_path = absolute_path.replace(parent_folder + \ os.sep, '') zip_file.write(absolute_path, relative_path) except IOError as message: print(message) sys.exit(1) except OSError as message: print(message) sys.exit(1) except zip_file.BadZipfile as message: print(message) sys.exit(1) finally: zip_file.close() accesslogger.info(resources.string_for_log('extract_features', \ request, page_spec_string = '___'.join(check_features))) return HttpResponse(json.dumps({"status": "OK"}))
def three_feature_sets_on_static_models(model, debug=False, challenging=False): ''' Conventions: variables ending with 15 refer to 1.5 current injection protocols. variables ending with 30 refer to 3.0 current injection protocols. Inputs: NML-DB models, a method designed to be called inside an iteration loop, where a list of models is iterated over, and on each iteration a new model is supplied to this method. Outputs: A dictionary of dataframes, for features sought according to: Druckman, EFEL, AllenSDK ''' ## # wrangle data in preperation for computing # Allen Features ## #import pdb; pdb.set_trace() times = np.array([float(t) for t in model.vm30.times]) volts = np.array([float(v) for v in model.vm30]) try: import asciiplotlib as apl fig = apl.figure() fig.plot(times, volts, label="V_{m} (mV), versus time (ms)", width=100, height=80) fig.show() except: pass ## # Allen Features ## #frame_shape,frame_dynamics,per_spike_info, meaned_features_overspikes all_allen_features30, allen_features = allen_format(volts, times, optional_vm=model.vm30) #if frame30 is not None: # frame30['protocol'] = 3.0 ## # wrangle data in preperation for computing # Allen Features ## times = np.array([float(t) for t in model.vm15.times]) volts = np.array([float(v) for v in model.vm15]) ## # Allen Features ## all_allen_features15, allen_features = allen_format(volts, times, optional_vm=model.vm15) ## # Get Druckman features, this is mainly handled in external files. ## #if model.ir_currents if hasattr(model, 'druckmann2013_input_resistance_currents'): DMTNMLO = dm_test_interoperable.DMTNMLO() DMTNMLO.test_setup(None, None, model=model) dm_test_features = DMTNMLO.runTest() else: dm_test_features = None #for d in dm_test_features: # if d is None: ## # Wrangle data to prepare for EFEL feature calculation. ## trace3 = {} trace3['T'] = [float(t) for t in model.vm30.times.rescale('ms')] trace3['V'] = [float(v) for v in model.vm30.magnitude] #temp_vm #trace3['peak_voltage'] = [ np.max(model.vm30) ] trace3['stimulus_current'] = [model.druckmann2013_strong_current] if not hasattr(model, 'allen'): trace3['stim_end'] = [trace3['T'][-1]] trace3['stim_start'] = [float(model.protocol['Time_Start'])] else: trace3['stim_end'] = [float(model.protocol['Time_End']) * 1000.0] trace3['stim_start'] = [float(model.protocol['Time_Start']) * 1000.0] traces3 = [ trace3 ] # Now we pass 'traces' to the efel and ask it to calculate the feature# values trace15 = {} trace15['T'] = [float(t) for t in model.vm15.times.rescale('ms')] trace15['V'] = [float(v) for v in model.vm15.magnitude] #temp_vm if not hasattr(model, 'allen'): trace15['stim_end'] = [trace15['T'][-1]] trace15['stim_start'] = [float(model.protocol['Time_Start'])] else: trace15['stim_end'] = [float(model.protocol['Time_End']) * 1000.0] trace15['stim_start'] = [float(model.protocol['Time_Start']) * 1000.0] trace15['stimulus_current'] = [model.druckmann2013_standard_current] trace15['stim_end'] = [trace15['T'][-1]] traces15 = [ trace15 ] # Now we pass 'traces' to the efel and ask it to calculate the feature# values ## # Compute # EFEL features (HBP) ## efel_15 = efel.getFeatureValues(traces15, list(efel.getFeatureNames())) # efel_30 = efel.getFeatureValues(traces3, list(efel.getFeatureNames())) # if challenging: efel_results_inh = more_challenging(model) if challenging: nu_preds = standard_nu_tests_two(DMTNMLO.model.nmldb_model) if debug == True: ## # sort of a bit like unit testing, but causes a dowload which slows everything down: ## assert DMTNMLO.model.druckmann2013_standard_current != DMTNMLO.model.druckmann2013_strong_current from neuronunit.capabilities import spike_functions as sf _ = not_necessary_for_program_completion(DMTNMLO) print( 'note: False in evidence of spiking is not completely damning \n') print( 'a threshold of 0mV is used to detect spikes, many models dont have a peak amp' ) print( 'above 0mV, so 0 spikes using the threshold technique is not final' ) print('druckman tests use derivative approach') # print(len(DMTNMLO.model.nmldb_model.get_APs())) print(len(sf.get_spike_train(model.vm30)) > 1) print(len(sf.get_spike_train(model.vm15)) > 1) print('\n\n\n\n\n\n successful run \n\n\n\n\n\n') if hasattr(model, 'information'): return { 'model_id': model.name, 'model_information': model.information, 'efel_15': efel_15, 'efel_30': efel_30, 'dm': dm_test_features, 'allen_15': all_allen_features15, 'allen_30': all_allen_features30 } else: return { 'model_id': model.name, 'model_information': 'allen_data', 'efel_15': efel_15, 'efel_30': efel_30, 'dm': dm_test_features, 'allen_15': all_allen_features15, 'allen_30': all_allen_features30 }
def three_feature_sets_on_static_models(model, unit_test=False, challenging=False): ''' Conventions: variables ending with 15 refer to 1.5 current injection protocols. variables ending with 30 refer to 3.0 current injection protocols. Inputs: NML-DB models, a method designed to be called inside an iteration loop, where a list of models is iterated over, and on each iteration a new model is supplied to this method. Outputs: A dictionary of dataframes, for features sought according to: Druckman, EFEL, AllenSDK ''' ## # wrangle data in preperation for computing # Allen Features ## #import pdb; pdb.set_trace() if type(model) is type(DataTC()): model = model.dtc_to_model() if not hasattr(model, 'vm30'): model.inject_square_current(model.rheobase * 3.0) model.vm30 = model.get_membrane_potential() ['amplitude'] model.inject_square_current() model.vm15 = model.get_membrane_potential() times = np.array([float(t) for t in model.vm30.times]) volts = np.array([float(v) for v in model.vm30]) try: import asciiplotlib as apl fig = apl.figure() fig.plot(times, volts, label="V_{m} (mV), versus time (ms)", width=100, height=80) fig.show() except: pass ## # Allen Features ## #frame_shape,frame_dynamics,per_spike_info, meaned_features_overspikes all_allen_features30, allen_features30 = allen_format( volts, times, optional_vm=model.vm30) #if frame30 is not None: # frame30['protocol'] = 3.0 ## # wrangle data in preperation for computing # Allen Features ## times = np.array([float(t) for t in model.vm15.times]) volts = np.array([float(v) for v in model.vm15]) ## # Allen Features ## all_allen_features15, allen_features15 = allen_format( volts, times, optional_vm=model.vm15) ## # Get Druckman features, this is mainly handled in external files. ## #if model.ir_currents DMTNMLO = dm_test_interoperable.DMTNMLO() if hasattr(model, 'druckmann2013_input_resistance_currents') and not hasattr( model, 'allen'): DMTNMLO.test_setup(None, None, model=model) else: DMTNMLO.test_setup(None, None, model=model, ir_current_limited=True) dm_test_features = DMTNMLO.runTest() ## # Wrangle data to prepare for EFEL feature calculation. ## trace3 = {} trace3['T'] = [float(t) for t in model.vm30.times.rescale('ms')] trace3['V'] = [float(v) for v in model.vm30.magnitude] #temp_vm trace3['stimulus_current'] = [model.druckmann2013_strong_current] if not hasattr(model, 'allen'): trace3['stim_end'] = [trace3['T'][-1]] trace3['stim_start'] = [float(model.protocol['Time_Start'])] else: trace3['stim_end'] = [float(model.protocol['Time_End']) * 1000.0] trace3['stim_start'] = [float(model.protocol['Time_Start']) * 1000.0] traces3 = [ trace3 ] # Now we pass 'traces' to the efel and ask it to calculate the feature# values trace15 = {} trace15['T'] = [float(t) for t in model.vm15.times.rescale('ms')] trace15['V'] = [float(v) for v in model.vm15.magnitude] #temp_vm if not hasattr(model, 'allen'): trace15['stim_end'] = [trace15['T'][-1]] trace15['stim_start'] = [float(model.protocol['Time_Start'])] else: trace15['stim_end'] = [float(model.protocol['Time_End']) * 1000.0] trace15['stim_start'] = [float(model.protocol['Time_Start']) * 1000.0] trace15['stimulus_current'] = [model.druckmann2013_standard_current] trace15['stim_end'] = [trace15['T'][-1]] traces15 = [ trace15 ] # Now we pass 'traces' to the efel and ask it to calculate the feature# values ## # Compute # EFEL features (HBP) ## efel.reset() if len(threshold_detection(model.vm15, threshold=0)): #pass threshold = float( np.max(model.vm15.magnitude) - 0.5 * np.abs(np.std(model.vm15.magnitude))) print(len(threshold_detection(model.vm15, threshold=threshold))) print(threshold, 'threshold', np.max(model.vm15.magnitude), np.min(model.vm15.magnitude)) #efel_15 = efel.getMeanFeatureValues(traces15,list(efel.getFeatureNames()))# else: threshold = float( np.max(model.vm15.magnitude) - 0.2 * np.abs(np.std(model.vm15.magnitude))) efel.setThreshold(threshold) print(len(threshold_detection(model.vm15, threshold=threshold))) print(threshold, 'threshold', np.max(model.vm15.magnitude)) if np.min(model.vm15.magnitude) < 0: efel_15 = efel.getMeanFeatureValues(traces15, list(efel.getFeatureNames())) else: efel_15 = None efel.reset() if len(threshold_detection(model.vm30, threshold=0)): threshold = float( np.max(model.vm30.magnitude) - 0.5 * np.abs(np.std(model.vm30.magnitude))) print(len(threshold_detection(model.vm30, threshold=threshold))) print(threshold, 'threshold', np.max(model.vm30.magnitude), np.min(model.vm30.magnitude)) #efel_30 = efel.getMeanFeatureValues(traces3,list(efel.getFeatureNames())) else: threshold = float( np.max(model.vm30.magnitude) - 0.2 * np.abs(np.std(model.vm30.magnitude))) efel.setThreshold(threshold) print(len(threshold_detection(model.vm15, threshold=threshold))) print(threshold, 'threshold', np.max(model.vm15.magnitude)) if np.min(model.vm30.magnitude) < 0: efel_30 = efel.getMeanFeatureValues(traces3, list(efel.getFeatureNames())) else: efel_30 = None efel.reset() if hasattr(model, 'information'): return { 'model_id': model.name, 'model_information': model.information, 'efel_15': efel_15, 'efel_30': efel_30, 'dm': dm_test_features, 'allen_15': all_allen_features15, 'allen_30': all_allen_features30 } else: return { 'model_id': model.name, 'model_information': 'allen_data', 'efel_15': efel_15, 'efel_30': efel_30, 'dm': dm_test_features, 'allen_15': all_allen_features15, 'allen_30': all_allen_features30 }
def extract_features(request): # if not ctx exit the application if "ctx" not in request.session: return render(request, 'efelg/overview.html') selected_traces_rest_json = request.session['selected_traces_rest_json'] global_parameters_json = request.session['global_parameters_json'] allfeaturesnames = efel.getFeatureNames() username = request.session['username'] time_info = request.session['time_info'] # conf_dir = request.session['main_json_dir'] conf_dir = EfelStorage.getMainJsonDir() # traces_files_dir = request.session['user_files_dir'] traces_files_dir = EfelStorage.getTracesDir() # user_files_dir = request.session['user_files_dir'] user_files_dir = EfelStorage.getUserFilesDir(username, time_info) # user_results_dir = request.session['user_results_dir'] user_results_dir = EfelStorage.getResultsDir(username, time_info) selected_features = request.session["selected_features"] cell_dict = {} for k in selected_traces_rest_json: path_to_file = os.path.join(user_files_dir, k + '.json') if k + '.json' not in os.listdir(user_files_dir): shutil.copy2(os.path.join(traces_files_dir, k + '.json'), path_to_file) with open(path_to_file) as f: crr_file_dict = json.loads(f.read()) crr_file_all_stim = list(crr_file_dict['traces'].keys()) crr_file_sel_stim = selected_traces_rest_json[k]['stim'] if "stimulus_unit" in crr_file_dict: crr_file_amp_unit = crr_file_dict["stimulus_unit"] elif "amp_unit" in crr_file_dict: crr_file_amp_unit = crr_file_dict["amp_unit"] else: raise Exception("stimulus_unit not found!") if "cell_id" in crr_file_dict: crr_cell_name = crr_file_dict["cell_id"] elif "name" in crr_file_dict: crr_cell_name = crr_file_dict["name"] else: raise Exception("cell_id not found!") new_keys = [("animal_species", "species"), ("brain_structure", "area"), ("cell_soma_location", "region"), ("cell_type", "type"), ("etype", "etype"), ("cell_id", "name")] keys = [ crr_file_dict[t[0]] if t[0] in crr_file_dict else crr_file_dict[t[1]] for t in new_keys ] keys2 = [] for kk2 in keys: if not type(kk2) == list: keys2.append(kk2) else: for kkk in kk2: keys2.append(kkk) crr_key = '____'.join(keys2) """ if crr_key in cell_dict: print("here") cell_dict[crr_key]['stim'].append(crr_file_sel_stim) cell_dict[crr_key]['files'].append(k) cell_dict[crr_key]['v_corr'] = crr_vcorr else: """ cell_dict[crr_key] = {} cell_dict[crr_key]['stim'] = [crr_file_sel_stim] cell_dict[crr_key]['files'] = [k] cell_dict[crr_key]['cell_name'] = crr_cell_name cell_dict[crr_key]['all_stim'] = crr_file_all_stim cell_dict[crr_key]['v_corr'] = [ int(selected_traces_rest_json[k]['v_corr']) ] target = [] final_cell_dict = {} final_exclude = [] for key in cell_dict: crr_el = cell_dict[key] for c_stim_el in crr_el['stim']: [ target.append(float(i)) for i in c_stim_el if float(i) not in target ] exc_stim_lists = [ list(set(crr_el['all_stim']) - set(sublist)) for sublist in crr_el['stim'] ] crr_exc = [] for crr_list in exc_stim_lists: crr_stim_val = [float(i) for i in crr_list] crr_exc.append(crr_stim_val) final_cell_dict[cell_dict[key]['cell_name']] = \ { 'v_corr': crr_el['v_corr'], 'ljp': 0, 'experiments': { 'step': { 'location': 'soma', 'files': [str(i) for i in crr_el['files']] } }, 'etype': 'etype', 'exclude': crr_exc, 'exclude_unit': [crr_file_amp_unit for i in range(len(crr_exc))] } # build configuration dictionary config = {} config['features'] = {'step': [str(i) for i in selected_features]} config['path'] = user_files_dir config['format'] = 'ibf_json' config['comment'] = [] config['cells'] = final_cell_dict config['options'] = { # 'featconffile': './pt_conf.json', # 'featzerotonan': False, 'zero_to_nan': { 'flag': bool(global_parameters_json['zero_to_nan']), 'value': global_parameters_json['value'], 'mean_features_no_zeros': global_parameters_json['mean_features_no_zeros'] }, 'relative': False, 'tolerance': 0.02, 'target': target, 'target_unit': 'nA', 'delay': 500, 'nanmean': True, 'logging': True, 'nangrace': 0, # 'spike_threshold': 1, 'amp_min': -1e22, 'zero_std': bool(global_parameters_json['zero_std']), 'trace_check': False, 'strict_stiminterval': { 'base': True }, 'print_table': { 'flag': True, 'num_events': int(global_parameters_json['num_events']), } } try: main_results_folder = os.path.join(user_results_dir, time_info + "_nfe_results") extractor = bpefe.Extractor(main_results_folder, config) extractor.create_dataset() extractor.plt_traces() if global_parameters_json['threshold'] != '': extractor.extract_features( threshold=int(global_parameters_json['threshold'])) else: extractor.extract_features(threshold=-20) extractor.mean_features() extractor.plt_features() extractor.feature_config_cells(version="legacy") extractor.feature_config_all(version="legacy") except ValueError as e: print('SOME ERROR OCCURED') print(e) conf_cit = os.path.join(conf_dir, 'citation_list.json') final_cit_file = os.path.join(main_results_folder, 'HOWTOCITE.txt') resources.print_citations(selected_traces_rest_json, conf_cit, final_cit_file) zip_name = time_info + '_nfe_results.zip' zip_path = os.path.join(user_results_dir, zip_name) request.session['nfe_result_file_zip'] = zip_path request.session['nfe_result_file_zip_name'] = zip_name for k in selected_traces_rest_json: f = os.path.join(user_results_dir, k + ".json") if os.path.exists(f): os.remove(f) #parent_folder = os.path.dirname(full_crr_result_folder) contents = os.walk(main_results_folder) try: zip_file = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) for root, folders, files in contents: for folder_name in folders: absolute_path = os.path.join(root, folder_name) relative_path = absolute_path.replace( main_results_folder + os.sep, '') zip_file.write(absolute_path, relative_path) for file_name in files: absolute_path = os.path.join(root, file_name) relative_path = absolute_path.replace( main_results_folder + os.sep, '') zip_file.write(absolute_path, relative_path) except IOError as message: print(message) sys.exit(1) except OSError as message: print(message) sys.exit(1) except zip_file.BadZipfile as message: print(message) sys.exit(1) finally: zip_file.close() # accesslogger.info(resources.string_for_log('extract_features', request, page_spec_string='___'.join(check_features))) return HttpResponse(json.dumps({"status": "OK"}))
def get_allfeature_values(): """Get back all the feature names and value""" import efel efel.reset() import numpy all_featurenames = efel.getFeatureNames() soma_data = numpy.loadtxt(os.path.join(testdata_dir, 'testdata.txt')) soma_time = soma_data[:, 0] soma_voltage = soma_data[:, 1] db_data = numpy.loadtxt(os.path.join(testdata_dir, 'testdbdata.txt')) db_time = db_data[:, 0] db_voltage = db_data[:, 1] bac_data = numpy.loadtxt(os.path.join(testdata_dir, 'testbacdata.txt')) bac_time = bac_data[:, 0] bac_voltage = bac_data[:, 1] bap1_data = numpy.loadtxt(os.path.join(testdata_dir, 'testbap1data.txt')) bap1_time = bap1_data[:, 0] bap1_voltage = bap1_data[:, 1] bap2_data = numpy.loadtxt(os.path.join(testdata_dir, 'testbap2data.txt')) bap2_time = bap2_data[:, 0] bap2_voltage = bap2_data[:, 1] trace = {} trace_db = {} trace['T'] = soma_time trace['V'] = soma_voltage trace['stim_start'] = [700] trace['stim_end'] = [2700] trace['T;location_AIS'] = soma_time trace['V;location_AIS'] = soma_voltage trace['stim_start;location_AIS'] = [700] trace['stim_end;location_AIS'] = [2700] trace['T;location_epsp'] = bac_time trace['V;location_epsp'] = bac_voltage trace['stim_start;location_epsp'] = [295] trace['stim_end;location_epsp'] = [600] trace['T;location_dend1'] = bap1_time trace['V;location_dend1'] = bap1_voltage trace['stim_start;location_dend1'] = [295] trace['stim_end;location_dend1'] = [500] trace['T;location_dend2'] = bap2_time trace['V;location_dend2'] = bap2_voltage trace['stim_start;location_dend2'] = [295] trace['stim_end;location_dend2'] = [500] trace_db['T'] = db_time trace_db['V'] = db_voltage trace_db['stim_start'] = [419.995] trace_db['stim_end'] = [1419.995] bpap_featurenames = [ 'BPAPHeightLoc1', 'BPAPHeightLoc2', 'BPAPAmplitudeLoc1', 'BPAPAmplitudeLoc2'] bac_featurenames = [ 'BAC_width'] db_featurenames = [ 'depol_block'] soma_featurenames = all_featurenames[:] for feature_name in bpap_featurenames: soma_featurenames.remove(feature_name) for feature_name in bac_featurenames: soma_featurenames.remove(feature_name) for feature_name in db_featurenames: soma_featurenames.remove(feature_name) import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") feature_values = efel.getFeatureValues([trace], soma_featurenames)[0] with warnings.catch_warnings(): warnings.simplefilter("ignore") feature_values = dict( list(feature_values.items()) + list(efel.getFeatureValues( [trace_db], db_featurenames)[0].items())) with warnings.catch_warnings(): warnings.simplefilter("ignore") efel.setThreshold(-30) feature_values = dict( list(feature_values.items()) + list(efel.getFeatureValues( [trace], bpap_featurenames)[0].items())) with warnings.catch_warnings(): warnings.simplefilter("ignore") efel.setThreshold(-55) feature_values = dict( list(feature_values.items()) + list(efel.getFeatureValues( [trace], bac_featurenames)[0].items())) for feature_name in feature_values: if feature_values[feature_name] is not None: feature_values[feature_name] = list( feature_values[feature_name]) return feature_values
def three_feature_sets_on_static_models(model, test_frame=None): ''' Conventions: variables ending with 15 refer to 1.5 current injection protocols. variables ending with 30 refer to 3.0 current injection protocols. Inputs: NML-DB models, a method designed to be called inside an iteration loop, where a list of models is iterated over, and on each iteration a new model is supplied to this method. Outputs: A dictionary of dataframes, for features sought according to: Druckman, EFEL, AllenSDK ''' ## # wrangle data in preperation for computing # Allen Features ## times = np.array([float(t) for t in model.vm30.times]) volts = np.array([float(v) for v in model.vm30]) ## # Allen Features ## #frame_shape,frame_dynamics,per_spike_info, meaned_features_overspikes frame30 = allen_format(volts, times) frame30['protocol'] = 3.0 ## # wrangle data in preperation for computing # Allen Features ## times = np.array([float(t) for t in model.vm15.times]) volts = np.array([float(v) for v in model.vm15]) ## # Allen Features ## frame15 = allen_format(volts, times) frame15['protocol'] = 1.5 allen_frame = frame30.append(frame15) #allen_frame.set_index('protocol') print(len(sf.get_spike_train(model.vm30)) > 1) print(len(sf.get_spike_train(model.vm15)) > 1) ## # Wrangle data to prepare for EFEL feature calculation. ## trace3 = {} trace3['T'] = [float(t) for t in model.vm30.times.rescale('ms')] trace3['V'] = [float(v) for v in model.vm30] #temp_vm #trace3['peak_voltage'] = [ np.max(model.vm30) ] trace3['stim_start'] = [float(model.protocol['Time_Start'])] trace3['stimulus_current'] = [model.druckmann2013_strong_current] trace3['stim_end'] = [trace3['T'][-1]] traces3 = [ trace3 ] # Now we pass 'traces' to the efel and ask it to calculate the feature# values trace15 = {} trace15['T'] = [float(t) for t in model.vm15.times.rescale('ms')] trace15['V'] = [float(v) for v in model.vm15] #temp_vm #trace15['peak_voltage'] = [ np.max(model.vm15) ] trace15['stim_start'] = [float(model.protocol['Time_Start'])] trace15['stimulus_current'] = [model.druckmann2013_standard_current] trace15['stim_end'] = [trace15['T'][-1]] traces15 = [ trace15 ] # Now we pass 'traces' to the efel and ask it to calculate the feature# values ## # Compute # EFEL features (HBP) ## efel_results15 = efel.getFeatureValues(traces15, list(efel.getFeatureNames())) # efel_results30 = efel.getFeatureValues(traces3, list(efel.getFeatureNames())) # # efel_results_inh = more_challenging(model) df15 = pd.DataFrame(efel_results15) #import pdb; pdb.set_trace() df15['protocol'] = 1.5 df30 = pd.DataFrame(efel_results30) df30['protocol'] = 3.0 efel_frame = df15.append(df30) #efel_frame.set_index('protocol') ## # Get Druckman features, this is mainly handled in external files. ## DMTNMLO = dm_test_interoperable.DMTNMLO() DMTNMLO.test_setup(None, None, model=model) dm_test_features = DMTNMLO.runTest() dm_frame = pd.DataFrame(dm_test_features) #nu_preds = standard_nu_tests_two(DMTNMLO.model.nmldb_model) #import pdb; pdb.set_trace() ## # sort of a bit like unit testing, but causes a dowload which slows everything down: ## # assert DMTNMLO.model.druckmann2013_standard_current != DMTNMLO.model.druckmann2013_strong_current # _ = not_necessary_for_program_completion(DMTNMLO) return {'efel': efel_frame, 'dm': dm_frame, 'allen': allen_frame}