def test_abf(rootdir): """Test loading of abf""" config = json.load(open(os.path.join(rootdir, 'configs', 'abf1.json'))) config['path'] = os.path.join(rootdir, config['path']) extractor = bpefe.Extractor('testtype_abf', config, use_git=False) extractor.create_dataset() extractor.plt_traces() extractor.extract_features(threshold=-30) extractor.mean_features() extractor.plt_features() extractor.feature_config_cells() extractor.feature_config_all()
def extr_features(self): with open(self.config_file, "r") as file: config = json.load(file) extractor = bpefe.Extractor('test_run', config) extractor.create_dataset() extractor.create_metadataset() #extractor.plt_traces() extractor.extract_features(threshold=-20) extractor.mean_features() #extractor.analyse_threshold() #extractor.plt_features() extractor.feature_config_cells(version='legacy') extractor.feature_config_all(version='legacy')
def test_config(rootdir, config_path): """Test config""" config = json.load(open(config_path)) config['path'] = os.path.join(rootdir, config['path']) extractor = bpefe.Extractor('test_run', config, use_git=False) extractor.create_dataset() extractor.plt_traces() extractor.extract_features(threshold=-30) extractor.mean_features() # extractor.analyse_threshold() extractor.plt_features() extractor.feature_config_cells() extractor.feature_config_all() extractor.plt_features_dist() '''
def test_ibf_json(rootdir): """Test ibf json""" config_str = """ { "cells": { "970509hp2": { "etype": "etype", "exclude": [ [ -1.8 ], [ -1.8 ] ], "experiments": { "step": { "files": [ "rattus-norvegicus____hippocampus____ca1____interneuron____cac____970509hp2____97509008", "rattus-norvegicus____hippocampus____ca1____interneuron____cac____970509hp2____97509009" ], "location": "soma" } }, "ljp": 0, "v_corr": 0 } }, "comment": [], "features": { "step": [ "time_to_last_spike", "time_to_second_spike", "voltage", "voltage_base" ] }, "format": "ibf_json", "options": { "delay": 500, "logging": false, "nanmean": false, "relative": false, "target": [ "all" ], "tolerance": 0.02 }, "path": "./data_ibf_json/eg_json_data/traces" } """ config = json.loads(config_str) json.dump(config, open(os.path.join(rootdir, 'configs', 'ibf_json1.json'), 'w'), sort_keys=True, indent=4) config['path'] = os.path.join(rootdir, config['path']) import bluepyefe as bpefe extractor = bpefe.Extractor( 'temptype_ibf', config) extractor.create_dataset() extractor.plt_traces() extractor.extract_features(threshold=-30) extractor.mean_features() extractor.plt_features() extractor.feature_config_cells(version='legacy') extractor.feature_config_all(version='legacy')
def test_config(rootdir): """Test config""" config_str = """ { "comment": [ "v_corr: normalize membrane potential to this value", "ljp: set so to 14mV", "etype: was defined individually by eye from plotted traces" ], "features": { "IDthresh": [ "adaptation_index2", "mean_frequency", "time_to_first_spike", "ISI_log_slope", "ISI_log_slope_skip", "time_to_last_spike", "inv_time_to_first_spike", "inv_first_ISI", "inv_second_ISI", "inv_third_ISI", "inv_fourth_ISI", "inv_fifth_ISI", "inv_last_ISI", "voltage_deflection", "voltage_deflection_begin", "steady_state_voltage", "decay_time_constant_after_stim", "AP_amplitude" ] }, "format": "igor", "cells": { "C060109A2-SR-C1": { "ljp": 14, "v_corr": false, "experiments": { "IDthresh": { "files": [ { "ordinal": "348", "i_unit": "A", "v_file": "C060109A2-SR-C1/X_IDthresh_ch1_348.ibw", "t_unit": "s", "i_file": "C060109A2-SR-C1/X_IDthresh_ch0_348.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "349", "i_unit": "A", "v_file": "C060109A2-SR-C1/X_IDthresh_ch1_349.ibw", "t_unit": "s", "i_file": "C060109A2-SR-C1/X_IDthresh_ch0_349.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "350", "i_unit": "A", "v_file": "C060109A2-SR-C1/X_IDthresh_ch1_350.ibw", "t_unit": "s", "i_file": "C060109A2-SR-C1/X_IDthresh_ch0_350.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "351", "i_unit": "A", "v_file": "C060109A2-SR-C1/X_IDthresh_ch1_351.ibw", "t_unit": "s", "i_file": "C060109A2-SR-C1/X_IDthresh_ch0_351.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "352", "i_unit": "A", "v_file": "C060109A2-SR-C1/X_IDthresh_ch1_352.ibw", "t_unit": "s", "i_file": "C060109A2-SR-C1/X_IDthresh_ch0_352.ibw", "v_unit": "V", "dt": 0.00025 } ], "location": "soma.v" } } }, "C060109A1-SR-C1": { "ljp": 14, "v_corr": false, "experiments": { "IDthresh": { "files": [ { "ordinal": "349", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_349.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_349.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "350", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_350.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_350.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "351", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_351.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_351.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "352", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_352.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_352.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "353", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_353.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_353.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "354", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_354.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_354.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "355", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_355.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_355.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "356", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_356.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_356.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "357", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_357.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_357.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "362", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_362.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_362.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "363", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_363.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_363.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "364", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_364.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_364.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "365", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_365.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_365.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "366", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_366.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_366.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "367", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_367.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_367.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "368", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_368.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_368.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "369", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_369.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_369.ibw", "v_unit": "V", "dt": 0.00025 }, { "ordinal": "370", "i_unit": "A", "v_file": "C060109A1-SR-C1/X_IDthresh_ch1_370.ibw", "t_unit": "s", "i_file": "C060109A1-SR-C1/X_IDthresh_ch0_370.ibw", "v_unit": "V", "dt": 0.00025 } ], "location": "soma.v" } } } }, "path": "./data/", "options": { "expthreshold": [ "IDrest", "IDthresh" ], "relative": true, "delay": 0, "nanmean": false, "target": [ -20, "noinput", 50, 100, 120, "all" ], "tolerance": [ 5, false, 5, 5, 5, false ], "nangrace": 0, "spike_threshold": 1, "amp_min": 0, "strict_stiminterval": { "SpikeRec": false, "base": true }, "onoff": { "TesteCode": [ 100.0, 600.0 ], "APWaveform": [ 5.0, 55.0 ], "IV": [ 20.0, 1020.0 ], "IDrest": [ 700.0, 2700.0 ], "SpontAPs": [ 100.0, 10100.0 ], "APDrop": [ 10.0, 15.0 ], "IRhyperpol": [ 500.0, 700.0 ], "Spontaneous": [ 100.0, 10100.0 ], "IDthresh": [ 700.0, 2700.0 ], "APThreshold": [ 0.0, 2000.0 ], "SpikeRec": [ 10.0, 13.5 ], "IDdepol": [ 700.0, 2700.0 ], "Step": [ 700.0, 2700.0 ], "Delta": [ 10.0, 60.0 ], "IRdepol": [ 500.0, 700.0 ] }, "logging": true, "boxcox": false } } """ config = json.loads(config_str) json.dump(config, open(os.path.join(rootdir, 'configs', 'ibw1.json'), 'w'), sort_keys=True, indent=4) config['path'] = os.path.join(rootdir, config['path']) extractor = bpefe.Extractor('testtype_legacy', config, use_git=False) extractor.create_dataset() extractor.plt_traces() extractor.extract_features(threshold=-30) extractor.mean_features() extractor.analyse_threshold() extractor.plt_features() extractor.feature_config_cells(version='legacy') extractor.feature_config_all(version='legacy') extractor = bpefe.Extractor('testtype', config, use_git=False) extractor.create_dataset() extractor.plt_traces() extractor.extract_features(threshold=-30) extractor.mean_features() extractor.analyse_threshold() extractor.plt_features() extractor.feature_config_cells() extractor.feature_config_all() extractor.plt_features_dist()
def test_csv(rootdir): """Test loading from csv""" config_str = """ { "features": { "step": [ "ISI_log_slope", "mean_frequency", "adaptation_index2", "ISI_CV", "AP_height", "AHP_depth_abs", "AHP_depth_abs_slow", "AHP_slow_time", "AP_width", "AP_amplitude", "AP1_amp", "AP2_amp", "APlast_amp", "AP_duration_half_width", "AHP_depth", "fast_AHP", "AHP_time_from_peak", "voltage_deflection", "voltage_deflection_begin", "voltage_base", "steady_state_voltage", "Spikecount", "time_to_last_spike", "time_to_first_spike", "inv_time_to_first_spike", "inv_first_ISI", "inv_second_ISI", "inv_third_ISI", "inv_fourth_ISI", "inv_fifth_ISI", "inv_last_ISI", "decay_time_constant_after_stim", "AP_begin_voltage", "AP_rise_time", "AP_fall_time", "AP_rise_rate", "AP_fall_rate" ] }, "path": "./data_csv/", "format": "csv_lccr", "comment": [ "cells named using name of first trace file belonging to this cell", "v_corr: normalize membrane potential to this value (given in UCL excel sheet)", "ljp: set so that RMP (UCL) - 10mV ljp = RMP (Golding 2001) -14mV ljp", "etype: was defined individually by eye from plotted traces" ], "cells": { "TEST_CELL": { "v_corr": false, "ljp": 14.4, "experiments": { "step": { "location": "soma", "files": [ "s150420-0403_ch1_cols", "s150420-0404_ch1_cols" ], "dt": 0.2, "startstop": [ 200, 1000 ], "amplitudes": [ 0.01, -0.01, 0.02, -0.02, 0.03, -0.03, 0.04, -0.04, 0.05, -0.05, 0.06, -0.06, 0.07, -0.07, 0.08, -0.08, 0.09, -0.09, 0.1, -0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6 ], "hypamp": 0.0, "ton": 200, "toff": 1000 } } } }, "options": { "relative": false, "tolerance": 0.01, "target": [ 0.02, -0.02, 0.04, -0.04, 0.06, -0.06, 0.08, -0.08, 0.1, -0.1, 0.2, 0.3, 0.4, 0.5, 0.6 ], "delay": 200, "nanmean": false } } """ config = json.loads(config_str) json.dump(config, open(os.path.join(rootdir, 'configs', 'csv1.json'), 'w'), sort_keys=True, indent=4) config['path'] = os.path.join(rootdir, config['path']) import bluepyefe as bpefe extractor = bpefe.Extractor('testtype_csv', config, use_git=False) extractor.create_dataset() extractor.plt_traces() extractor.extract_features() extractor.mean_features() # extractor.analyse_threshold() extractor.feature_config_cells() extractor.feature_config_all() extractor.plt_features()
def extract_features(request): # if not ctx exit the application if "ctx" not in request.session: return render(request, 'efelg/overview.html') selected_traces_rest_json = request.session['selected_traces_rest_json'] global_parameters_json = request.session['global_parameters_json'] allfeaturesnames = efel.getFeatureNames() username = request.session['username'] time_info = request.session['time_info'] # conf_dir = request.session['main_json_dir'] conf_dir = EfelStorage.getMainJsonDir() # traces_files_dir = request.session['user_files_dir'] traces_files_dir = EfelStorage.getTracesDir() # user_files_dir = request.session['user_files_dir'] user_files_dir = EfelStorage.getUserFilesDir(username, time_info) # user_results_dir = request.session['user_results_dir'] user_results_dir = EfelStorage.getResultsDir(username, time_info) selected_features = request.session["selected_features"] cell_dict = {} for k in selected_traces_rest_json: path_to_file = os.path.join(user_files_dir, k + '.json') if k + '.json' not in os.listdir(user_files_dir): shutil.copy2(os.path.join(traces_files_dir, k + '.json'), path_to_file) with open(path_to_file) as f: crr_file_dict = json.loads(f.read()) crr_file_all_stim = list(crr_file_dict['traces'].keys()) crr_file_sel_stim = selected_traces_rest_json[k]['stim'] if "stimulus_unit" in crr_file_dict: crr_file_amp_unit = crr_file_dict["stimulus_unit"] elif "amp_unit" in crr_file_dict: crr_file_amp_unit = crr_file_dict["amp_unit"] else: raise Exception("stimulus_unit not found!") if "cell_id" in crr_file_dict: crr_cell_name = crr_file_dict["cell_id"] elif "name" in crr_file_dict: crr_cell_name = crr_file_dict["name"] else: raise Exception("cell_id not found!") new_keys = [("animal_species", "species"), ("brain_structure", "area"), ("cell_soma_location", "region"), ("cell_type", "type"), ("etype", "etype"), ("cell_id", "name")] keys = [ crr_file_dict[t[0]] if t[0] in crr_file_dict else crr_file_dict[t[1]] for t in new_keys ] keys2 = [] for kk2 in keys: if not type(kk2) == list: keys2.append(kk2) else: for kkk in kk2: keys2.append(kkk) crr_key = '____'.join(keys2) """ if crr_key in cell_dict: print("here") cell_dict[crr_key]['stim'].append(crr_file_sel_stim) cell_dict[crr_key]['files'].append(k) cell_dict[crr_key]['v_corr'] = crr_vcorr else: """ cell_dict[crr_key] = {} cell_dict[crr_key]['stim'] = [crr_file_sel_stim] cell_dict[crr_key]['files'] = [k] cell_dict[crr_key]['cell_name'] = crr_cell_name cell_dict[crr_key]['all_stim'] = crr_file_all_stim cell_dict[crr_key]['v_corr'] = [ int(selected_traces_rest_json[k]['v_corr']) ] target = [] final_cell_dict = {} final_exclude = [] for key in cell_dict: crr_el = cell_dict[key] for c_stim_el in crr_el['stim']: [ target.append(float(i)) for i in c_stim_el if float(i) not in target ] exc_stim_lists = [ list(set(crr_el['all_stim']) - set(sublist)) for sublist in crr_el['stim'] ] crr_exc = [] for crr_list in exc_stim_lists: crr_stim_val = [float(i) for i in crr_list] crr_exc.append(crr_stim_val) final_cell_dict[cell_dict[key]['cell_name']] = \ { 'v_corr': crr_el['v_corr'], 'ljp': 0, 'experiments': { 'step': { 'location': 'soma', 'files': [str(i) for i in crr_el['files']] } }, 'etype': 'etype', 'exclude': crr_exc, 'exclude_unit': [crr_file_amp_unit for i in range(len(crr_exc))] } # build configuration dictionary config = {} config['features'] = {'step': [str(i) for i in selected_features]} config['path'] = user_files_dir config['format'] = 'ibf_json' config['comment'] = [] config['cells'] = final_cell_dict config['options'] = { # 'featconffile': './pt_conf.json', # 'featzerotonan': False, 'zero_to_nan': { 'flag': bool(global_parameters_json['zero_to_nan']), 'value': global_parameters_json['value'], 'mean_features_no_zeros': global_parameters_json['mean_features_no_zeros'] }, 'relative': False, 'tolerance': 0.02, 'target': target, 'target_unit': 'nA', 'delay': 500, 'nanmean': True, 'logging': True, 'nangrace': 0, # 'spike_threshold': 1, 'amp_min': -1e22, 'zero_std': bool(global_parameters_json['zero_std']), 'trace_check': False, 'strict_stiminterval': { 'base': True }, 'print_table': { 'flag': True, 'num_events': int(global_parameters_json['num_events']), } } try: main_results_folder = os.path.join(user_results_dir, time_info + "_nfe_results") extractor = bpefe.Extractor(main_results_folder, config) extractor.create_dataset() extractor.plt_traces() if global_parameters_json['threshold'] != '': extractor.extract_features( threshold=int(global_parameters_json['threshold'])) else: extractor.extract_features(threshold=-20) extractor.mean_features() extractor.plt_features() extractor.feature_config_cells(version="legacy") extractor.feature_config_all(version="legacy") except ValueError as e: print('SOME ERROR OCCURED') print(e) conf_cit = os.path.join(conf_dir, 'citation_list.json') final_cit_file = os.path.join(main_results_folder, 'HOWTOCITE.txt') resources.print_citations(selected_traces_rest_json, conf_cit, final_cit_file) zip_name = time_info + '_nfe_results.zip' zip_path = os.path.join(user_results_dir, zip_name) request.session['nfe_result_file_zip'] = zip_path request.session['nfe_result_file_zip_name'] = zip_name for k in selected_traces_rest_json: f = os.path.join(user_results_dir, k + ".json") if os.path.exists(f): os.remove(f) #parent_folder = os.path.dirname(full_crr_result_folder) contents = os.walk(main_results_folder) try: zip_file = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) for root, folders, files in contents: for folder_name in folders: absolute_path = os.path.join(root, folder_name) relative_path = absolute_path.replace( main_results_folder + os.sep, '') zip_file.write(absolute_path, relative_path) for file_name in files: absolute_path = os.path.join(root, file_name) relative_path = absolute_path.replace( main_results_folder + os.sep, '') zip_file.write(absolute_path, relative_path) except IOError as message: print(message) sys.exit(1) except OSError as message: print(message) sys.exit(1) except zip_file.BadZipfile as message: print(message) sys.exit(1) finally: zip_file.close() # accesslogger.info(resources.string_for_log('extract_features', request, page_spec_string='___'.join(check_features))) return HttpResponse(json.dumps({"status": "OK"}))
def extract_features(request): # if not ctx exit the application if not "ctx" in request.session: return render(request, 'efelg/hbp_redirect.html') data_dir = request.session['data_dir'] json_dir = request.session['json_dir'] selected_traces_rest_json = request.session['selected_traces_rest_json'] allfeaturesnames = efel.getFeatureNames() crr_user_folder = request.session['time_info'] full_crr_result_folder = request.session['u_crr_res_r_dir'] full_crr_uploaded_folder = request.session['u_up_dir'] full_crr_data_folder = request.session['u_crr_res_d_dir'] full_crr_user_folder = request.session['user_crr_res_dir'] check_features = request.session["check_features"] request.session['selected_features'] = check_features cell_dict = {} selected_traces_rest = [] for k in selected_traces_rest_json: #crr_vcorr = selected_traces_rest_json[k]['vcorr'] crr_file_rest_name = k + '.json' crr_name_split = k.split('____') crr_cell_name = crr_name_split[5] crr_sample_name = crr_name_split[6] crr_key = crr_name_split[0] + '____' + crr_name_split[1] + '____' + \ crr_name_split[2] + '____' + crr_name_split[3] + '____' + \ crr_name_split[4] + '____' + crr_name_split[5] if os.path.isfile(os.path.join(json_dir, crr_file_rest_name)): crr_json_file = os.path.join(json_dir, crr_file_rest_name) elif os.path.isfile(os.path.join(full_crr_uploaded_folder, \ crr_file_rest_name)): crr_json_file = os.path.join(full_crr_uploaded_folder, \ crr_file_rest_name) else: continue with open(crr_json_file) as f: crr_file_dict_read = f.read() crr_file_dict = json.loads(crr_file_dict_read) crr_file_all_stim = crr_file_dict['traces'].keys() crr_file_sel_stim = selected_traces_rest_json[k]['stim'] crr_cell_data_folder = os.path.join(full_crr_data_folder, crr_cell_name) crr_cell_data_folder = full_crr_data_folder if not os.path.exists(crr_cell_data_folder): os.makedirs(crr_cell_data_folder) shutil.copy2(crr_json_file, crr_cell_data_folder) # if crr_key in cell_dict: cell_dict[crr_key]['stim'].append(crr_file_sel_stim) cell_dict[crr_key]['files'].append(k) #cell_dict[crr_key]['vcorr'].append(float(crr_vcorr)) else: cell_dict[crr_key] = {} cell_dict[crr_key]['stim'] = [crr_file_sel_stim] cell_dict[crr_key]['files'] = [k] cell_dict[crr_key]['cell_name'] = crr_cell_name cell_dict[crr_key]['all_stim'] = crr_file_all_stim #cell_dict[crr_key]['vcorr'] = [float(crr_vcorr)] target = [] final_cell_dict = {} final_exclude = [] for key in cell_dict: crr_el = cell_dict[key] #v_corr = crr_el['vcorr'] v_corr = 0 for c_stim_el in crr_el['stim']: [ target.append(float(i)) for i in c_stim_el if float(i) not in target ] exc_stim_lists = [list(set(crr_el['all_stim']) - set(sublist)) for \ sublist in crr_el['stim']] crr_exc = [] for crr_list in exc_stim_lists: crr_stim_val = [float(i) for i in crr_list] crr_exc.append(crr_stim_val) final_cell_dict[cell_dict[key]['cell_name']] = {'v_corr':v_corr, \ 'ljp':0, 'experiments':{'step': {'location':'soma', 'files': \ [str(i) for i in crr_el['files']]}}, 'etype':'etype', \ 'exclude':crr_exc} # build configuration dictionary config = {} config['features'] = {'step': [str(i) for i in check_features]} config['path'] = crr_cell_data_folder config['format'] = 'ibf_json' config['comment'] = [] config['cells'] = final_cell_dict config['options'] = {'relative': False, 'tolerance': 0.02, \ 'target': target, 'delay': 500, 'nanmean': False, 'logging':False, \ 'nangrace': 0, 'spike_threshold': 1, 'amp_min': 0, 'strict_stiminterval': {'base': True}} try: extractor = bpefe.Extractor(full_crr_result_folder, config, use_git=False) extractor.create_dataset() extractor.plt_traces() extractor.extract_features() extractor.mean_features() extractor.plt_features() extractor.feature_config_cells(version="legacy") extractor.feature_config_all(version="legacy") except: return render(request, 'efelg/hbp_redirect.html', {"status":"KO", \ "message": "An error occured while extracting the features. \ Either you selected too many data or the traces were corrupted." }) conf_dir = request.session['conf_dir'] conf_cit = os.path.join(conf_dir, 'citation_list.json') final_cit_file = os.path.join(full_crr_result_folder, 'HOWTOCITE.txt') resources.print_citations(selected_traces_rest_json, conf_cit, final_cit_file) crr_result_folder = request.session['time_info'] output_path = os.path.join(full_crr_user_folder, crr_user_folder + \ '_results.zip') request.session['result_file_zip'] = output_path request.session['result_file_zip_name'] = crr_user_folder + '_results.zip' parent_folder = os.path.dirname(full_crr_result_folder) contents = os.walk(full_crr_result_folder) try: zip_file = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) for root, folders, files in contents: for folder_name in folders: absolute_path = os.path.join(root, folder_name) relative_path = absolute_path.replace(parent_folder + \ os.sep, '') zip_file.write(absolute_path, relative_path) for file_name in files: absolute_path = os.path.join(root, file_name) relative_path = absolute_path.replace(parent_folder + \ os.sep, '') zip_file.write(absolute_path, relative_path) except IOError as message: print(message) sys.exit(1) except OSError as message: print(message) sys.exit(1) except zip_file.BadZipfile as message: print(message) sys.exit(1) finally: zip_file.close() accesslogger.info(resources.string_for_log('extract_features', \ request, page_spec_string = '___'.join(check_features))) return HttpResponse(json.dumps({"status": "OK"}))