def delete_unit(self, unit_num, shell=False): if isinstance(unit_num, str): unit_num = dio.h5io.parse_unit_number(unit_num) if unit_num is None: print('No unit deleted') return q = userIO.ask_user('Are you sure you want to delete unit%03i?' % unit_num, choices=['No', 'Yes'], shell=shell) if q == 0: print('No unit deleted') return else: tmp = ss.delete_unit(self.root_dir, unit_num) if tmp is False: userIO.tell_user( 'Unit %i not found in dataset. No unit deleted' % unit_num, shell=shell) else: userIO.tell_user('Unit %i sucessfully deleted.' % unit_num, shell=shell) self.save()
def units_similarity(self, similarity_cutoff=50, shell=False): if 'SSH_CONNECTION' in os.environ: shell = True metrics_dir = os.path.join(self.root_dir, 'sorted_unit_metrics') if not os.path.isdir(metrics_dir): raise ValueError( 'No sorted unit metrics found. Must sort units before calculating similarity' ) violation_file = os.path.join(metrics_dir, 'units_similarity_violations.txt') violations, sim = ss.calc_units_similarity(self.h5_file, self.sampling_rate, similarity_cutoff, violation_file) if len(violations) == 0: userIO.tell_user('No similarity violations found!', shell=shell) self.process_status['units_similarity'] = True return violations, sim out_str = ['Units Similarity Violations Found:'] out_str.append('Unit_1 Unit_2 Similarity') for x, y in violations: u1 = dio.h5io.parse_unit_number(x) u2 = dio.h5io.parse_unit_number(y) out_str.append(' {:<10}{:<10}{}\n'.format(x, y, sim[u1][u2])) out_str.append('Delete units with dataset.delete_unit(N)') out_str = '\n'.join(out_str) userIO.tell_user(out_str, shell=shell) self.process_status['units_similarity'] = True self.save() return violations, sim
def create_trial_list(self): '''Create lists of trials based on digital inputs and outputs and store to hdf5 store Can only be run after data extraction ''' if not self.process_status['extract_data']: userIO.tell_user('Must extract data before creating trial list', shell=True) return if self.rec_info.get('dig_in'): in_list = dio.h5io.create_trial_data_table(self.h5_file, self.dig_in_mapping, self.sampling_rate, 'in') self.dig_in_trials = in_list else: print('No digital input data found') if self.rec_info.get('dig_out'): out_list = dio.h5io.create_trial_data_table( self.h5_file, self.dig_out_mapping, self.sampling_rate, 'out') self.dig_out_trials = out_list else: print('No digital output data found') self.process_status['create_trial_list'] = True self.save()
def mark_dead_channels(self, dead_channels=None, shell=False): '''Plots small piece of raw traces and a metric to help identify dead channels. Once user marks channels as dead a new column is added to electrode mapping Parameters ---------- dead_channels : list of int, optional if this is specified then nothing is plotted, those channels are simply marked as dead shell : bool, optional ''' print('Marking dead channels\n----------') em = self.electrode_mapping.copy() if dead_channels is None: userIO.tell_user( 'Making traces figure for dead channel detection...', shell=True) save_file = os.path.join(self.root_dir, 'Electrode_Traces.png') fig, ax = datplt.plot_traces_and_outliers(self.h5_file, save_file=save_file) if not shell: # Better to open figure outside of python since its a lot of # data on figure and matplotlib is slow subprocess.call(['xdg-open', save_file]) else: userIO.tell_user('Saved figure of traces to %s for reference' % save_file, shell=shell) choice = userIO.select_from_list('Select dead channels:', em.Electrode.to_list(), 'Dead Channel Selection', multi_select=True, shell=shell) dead_channels = list(map(int, choice)) print('Marking eletrodes %s as dead.\n' 'They will be excluded from common average referencing.' % dead_channels) em['dead'] = False em.loc[dead_channels, 'dead'] = True self.electrode_mapping = em if os.path.isfile(self.h5_file): dio.h5io.write_electrode_map_to_h5(self.h5_file, self.electrode_mapping) self.process_status['mark_dead_channels'] = True self.save() return dead_channels
def read_rec_info(file_dir, shell=True): '''Reads the info.rhd file to get relevant parameters. Parameters ---------- file_dir : str, path to recording directory Returns ------- dict, necessary analysis info from info.rhd fields: amplifier_sampling_rate, dig_in_sampling_rate, notch_filter, ports (list, corresponds to channels), channels (list) Throws ------ FileNotFoundError : if info.rhd is not in file_dir ''' info_file = os.path.join(file_dir, 'info.rhd') if not os.path.isfile(info_file): raise FileNotFoundError('info.rhd file not found in %s' % file_dir) out = {} print('Reading info.rhd file...') try: info = load_intan_rhd_format.read_data(info_file) except Exception as e: # TODO: Have a way to manually input settings info = None userIO.tell_user('%s was unable to be read. May be corrupted or ' 'recording may have been interrupted' % info_file, shell=True) raise e freq_params = info['frequency_parameters'] notch_freq = freq_params['notch_filter_frequency'] amp_fs = freq_params['amplifier_sample_rate'] dig_in_fs = freq_params['board_dig_in_sample_rate'] out = { 'amplifier_sampling_rate': amp_fs, 'dig_in_sampling_rate': dig_in_fs, 'notch_filter': notch_freq } amp_ch = info['amplifier_channels'] ports = [x['port_prefix'] for x in amp_ch] channels = [x['native_order'] for x in amp_ch] out['ports'] = ports out['channels'] = channels out['num_channels'] = len(channels) if info.get('board_dig_in_channels'): dig_in = info['board_dig_in_channels'] din = [x['native_order'] for x in dig_in] out['dig_in'] = din if info.get('board_dig_out_channels'): dig_out = info['board_dig_out_channels'] dout = [x['native_order'] for x in dig_out] out['dig_out'] = dout out['file_type'] = get_recording_filetype(file_dir) print('\nRecording Info\n--------------\n') print(pt.print_dict(out)) return out
def find_held_units(rec_dirs, percent_criterion=95, rec_names=None, raw_waves=False): # TODO: if any rec is 'one file per signal type' create tmp_raw.hdf5 and # delete after detection is finished userIO.tell_user('Computing intra recording J3 values...', shell=True) intra_J3 = get_intra_J3(rec_dirs) if rec_names is None: rec_names = [os.path.basename(x) for x in rec_dirs] rec_labels = {x: y for x, y in zip(rec_names, rec_dirs)} print('\n----------\nComputing Inter J3s\n----------\n') rec_pairs = [(rec_names[i], rec_names[i + 1]) for i in range(len(rec_names) - 1)] held_df = pd.DataFrame(columns=[ 'unit', 'electrode', 'single_unit', 'unit_type', *rec_names, 'J3' ]) # Go through each pair of directories and computer inter_J3 between # units. If the inter_J3 values is below the percentile_criterion of # the intra_j3 array then mark units as held. Only compare the same # type of single units on the same electrode inter_J3 = [] for rec1, rec2 in rec_pairs: rd1 = rec_labels.get(rec1) rd2 = rec_labels.get(rec2) h5_file1 = h5io.get_h5_filename(rd1) h5_file2 = h5io.get_h5_filename(rd2) print('Comparing %s vs %s' % (rec1, rec2)) found_cells = [] unit_names1 = h5io.get_unit_names(rd1) unit_names2 = h5io.get_unit_names(rd2) for unit1 in unit_names1: if raw_waves: wf1, descrip1, fs1 = h5io.get_raw_unit_waveforms(rd1, unit1) else: wf1, descrip1, fs1 = h5io.get_unit_waveforms(rd1, unit1) electrode = descrip1['electrode_number'] single_unit = bool(descrip1['single_unit']) unit_type = h5io.read_unit_description(descrip1) if descrip1['single_unit'] == 1: for unit2 in unit_names2: if raw_waves: wf2, descrip2, fs2 = \ h5io.get_raw_unit_waveforms(rd2, unit2, required_descrip=descrip1) else: wf2, descrip2, fs2 = h5io.get_unit_waveforms( rd2, unit2, required_descrip=descrip1) if descrip1 == descrip2 and wf2 is not None: print('Comparing %s %s vs %s %s' % (rec1, unit1, rec2, unit2)) userIO.tell_user('Comparing %s %s vs %s %s' % (rec1, unit1, rec2, unit2), shell=True) if fs1 > fs2: wf1 = sas.interpolate_waves(wf1, fs1, fs2) elif fs1 < fs2: wf2 = sas.interpolate_waves(wf2, fs2, fs1) pca = PCA(n_components=3) pca.fit(np.concatenate((wf1, wf2), axis=0)) pca_wf1 = pca.transform(wf1) pca_wf2 = pca.transform(wf2) J3 = calc_J3(pca_wf1, pca_wf2) inter_J3.append(J3) if J3 <= np.percentile(intra_J3, percent_criterion): print('Detected held unit:\n %s %s and %s %s' % (rec1, unit1, rec2, unit2)) userIO.tell_user( 'Detected held unit:\n %s %s and %s %s' % (rec1, unit1, rec2, unit2), shell=True) found_cells.append((h5io.parse_unit_number(unit1), h5io.parse_unit_number(unit2), J3, single_unit, unit_type)) found_cells = np.array(found_cells) userIO.tell_user('\n-----\n%s vs %s\n-----' % (rec1, rec2), shell=True) userIO.tell_user(str(found_cells) + '\n', shell=True) userIO.tell_user('Resolving duplicates...', shell=True) found_cells = resolve_duplicate_matches(found_cells) userIO.tell_user('Results:\n%s\n' % str(found_cells), shell=True) for i, row in enumerate(found_cells): if held_df.empty: uL = 'A' else: uL = held_df['unit'].iloc[-1] uL = pt.get_next_letter(uL) unit1 = 'unit%03d' % int(row[0]) unit2 = 'unit%03d' % int(row[1]) j3 = row[2] idx1 = np.where(held_df[rec1] == unit1)[0] idx2 = np.where(held_df[rec2] == unit2)[0] if row[3] == 'True': single_unit = True else: single_unit = False if idx1.size == 0 and idx2.size == 0: tmp = { 'unit': uL, 'single_unit': single_unit, 'unit_type': row[4], rec1: unit1, rec2: unit2, 'J3': [float(j3)] } held_df = held_df.append(tmp, ignore_index=True) elif idx1.size != 0 and idx2.size != 0: userIO.tell_user('WTF...', shell=True) continue elif idx1.size != 0: held_df[rec2].iloc[idx1[0]] = unit2 held_df['J3'].iloc[idx1[0]].append(float(j3)) else: held_df[rec1].iloc[idx2[0]] = unit1 held_df['J3'].iloc[idx2[0]].append(float(j3)) return held_df, intra_J3, inter_J3