def loadSPK_waveclus(filename,time_edge=(0,60)): ''' load Spikes sorted by wave_clus. Parameters ---------- filename: str Name of the spike (.mat) file time_edge: tupple (0,60) (default) - Determine the x-axis limits in seconds. ''' mat = sio.loadmat(filename, struct_as_record=False, squeeze_me=True) clusters = mat['cluster_class'][:,0] times = mat['cluster_class'][:,1]/1000 spikes = mat['spikes'] features = mat['inspk'] labels = [] for cl in range(int(max(clusters))+1): labels.append('Cluster '+str(cl)) Spikes = EventList(labels,time_edge) for idx,waveform in enumerate(spikes): tstamp = times[idx] clus = clusters[idx] feat= features[idx] spk = SpikeObj(waveform,tstamp,clus,feat) Spikes.__addEvent__(spk) return Spikes
def findHFO_filtHilbert(Data, low_cut, high_cut=None, order=None, window=('kaiser', 0.5), ths=5, ths_method='STD', min_dur=3, min_separation=2, energy=False): ''' Find HFO by Filter-Hilbert method. Parameters ---------- Data: DataObj Data object to filt/find HFO low_cut: int Low cut frequency. high_cut: int High cut frequency. If None, high_cut = nyrqst order: int, optional None (default) - Order of the filter calculated as 1/10 of sample rate window : string or tuple of string and parameter values Desired window to use. See `scipy.signal.get_window` for a list of windows and required parameters. ths : int, optional 5 (default) - times value of threshold (5*STD for example) ths_method: str, optional 'STD' - Standard desviation above the mean 'Tukey' - Interquartil interval above percentile 75 min_dur: int, optional 3 (default) - minimal number of cicle that event should last. Calculeted the number of points that event should last by formula ceil(min_dur*sample_rate/high_cut) min_separation: int, optional 2 (defalt) - minimal number of cicle that separete events. Calculetad the number of points that separete events by formula ceil(min_separation*sample_rate/low_cut) ''' if low_cut == None and high_cut == None: raise Exception('You should determine the cutting frequencies') sample_rate = Data.sample_rate # if no high cut, =nyrqst if high_cut == None: high_cut = sample_rate / 2 cutoff = [low_cut, high_cut] # Transform min_dur from cicles to poinst - minimal duration of HFO (Default is 3 cicles) min_dur = math.ceil(min_dur * sample_rate / high_cut) # Transform min_separation from cicles to points - minimal separation between events min_separation = math.ceil(min_separation * sample_rate / low_cut) # filtering filtOBj = eegfilt(Data, low_cut, high_cut, order, window) nch = filtOBj.n_channels if order == None: order = int(sample_rate / 10) info = str(low_cut) + '-' + str(high_cut) + ' Hz filtering; order: ' + str( order) + ', window: ' + str(window) + ' ; ' + str( ths) + '*' + ths_method + '; min_dur = ' + str( min_dur) + '; min_separation = ' + str(min_separation) HFOs = EventList(Data.ch_labels, (Data.time_vec[0], Data.time_vec[-1])) if nch == 1: print 'Finding in channel' filt = filtOBj.data env = np.abs(sig.hilbert(filt)) if ths_method == 'STD': ths_value = np.mean(env) + ths * np.std(env) elif ths_method == 'Tukey': ths_value = np.percentile( env, 75) + ths * (np.percentile(env, 75) - np.percentile(env, 25)) start, end = findStartEnd(filt, env, ths_value, min_dur, min_separation) for s, e in zip(start, end): index = np.arange(s, e) HFOwaveform = env[index] tstamp_points = s + np.argmax(HFOwaveform) tstamp = Data.time_vec[tstamp_points] Lindex = np.arange(tstamp_points - int(sample_rate / 2), tstamp_points + int(sample_rate / 2) + 1) tstamp_idx = np.nonzero(Lindex == tstamp_points)[0][0] waveform = np.empty((Lindex.shape[0], 2)) waveform[:] = np.NAN waveform[:, 0] = Data.data[Lindex] waveform[:, 1] = filtOBj.data[Lindex] start_idx = np.nonzero(Lindex == s)[0][0] end_idx = np.nonzero(Lindex == e)[0][0] hfo = hfoObj(0, tstamp, tstamp_idx, waveform, start_idx, end_idx, ths_value, sample_rate, cutoff, info) HFOs.__addEvent__(hfo) else: for ch in range(nch): if ch not in filtOBj.bad_channels: print 'Finding in channel ' + filtOBj.ch_labels[ch] filt = filtOBj.data[:, ch] if energy: env = np.abs(sig.hilbert(filt))**2 else: env = np.abs(sig.hilbert(filt)) if ths_method == 'STD': ths_value = np.mean(env) + ths * np.std(env) elif ths_method == 'Tukey': ths_value = np.percentile(env, 75) + ths * ( np.percentile(env, 75) - np.percentile(env, 25)) start, end = findStartEnd(filt, env, ths_value, min_dur, min_separation) for s, e in zip(start, end): index = np.arange(s, e) HFOwaveform = env[index] tstamp_points = s + np.argmax(HFOwaveform) tstamp = Data.time_vec[tstamp_points] Lindex = np.arange( tstamp_points - int(sample_rate / 2), tstamp_points + int(sample_rate / 2) + 1) tstamp_idx = np.nonzero(Lindex == tstamp_points)[0][0] waveform = np.empty((Lindex.shape[0], 2)) waveform[:] = np.NAN waveform[:, 0] = Data.data[Lindex, ch] waveform[:, 1] = filtOBj.data[Lindex, ch] start_idx = np.nonzero(Lindex == s)[0][0] end_idx = np.nonzero(Lindex == e)[0][0] hfo = hfoObj(ch, tstamp, tstamp_idx, waveform, start_idx, end_idx, ths_value, sample_rate, cutoff, info) HFOs.__addEvent__(hfo) return HFOs
def open_dataset(file_name,dataset_name,htype = 'auto'): ''' open a dataset in a specific file_name Parameters ---------- file_name: str Name of the HDF5 (.h5) file dataset_name: str Name of dataset to open htype: str, optional auto (the default) - read htype from HDF file Data - DataObj type Spike - SpikeObj type hfo - hfoObj type ''' # reading h5 file h5 = h5py.File(file_name,'r+') # loading dataset dataset = h5[dataset_name] # getting htype if htype == 'auto': htype = dataset.attrs['htype'] if htype == 'Data': # Sample Rate attribute sample_rate = dataset.attrs['SampleRate[Hz]'] n_points = dataset.shape[0] end_time = n_points/sample_rate # Amplitude Unit if 'amp_unit' in dataset.attrs: amp_unit = dataset.attrs['amp_unit'] else: amp_unit = 'AU' # Time vector if 'Time_vec_edge' in dataset.attrs: edge = dataset.attrs['Time_vec_edge'] time_vec = np.linspace(edge[0],edge[1],n_points,endpoint=False) else: time_vec = np.linspace(0,end_time,n_points,endpoint=False) # Check if has 'Bad_channels' attribute, if not, create one empty if len([x for x in dataset.attrs.keys() if x == 'Bad_channels']) == 0: dataset.attrs.create("Bad_channels",[],dtype=int) # Load bad channels bad_channels = dataset.attrs["Bad_channels"] # Creating dictionary Data = DataObj(dataset[:],sample_rate,amp_unit,dataset.attrs['Channel_Labels'],time_vec,bad_channels,file_name,dataset_name) elif htype == 'list': # Time vector keys = dataset.keys() ch_labels = dataset.attrs['ch_labels'] time_edge = dataset.attrs['time_edge'] Data = EventList(ch_labels,time_edge,file_name,dataset_name) for k in keys: waveform = dataset[k][:] tstamp = dataset[k].attrs['tstamp'] evhtype = dataset[k].attrs['htype'] if evhtype == 'Spike': clus = dataset[k].attrs['cluster'] feat = dataset[k].attrs['features'] spk = SpikeObj(waveform,tstamp,clus,feat) Data.__addEvent__(spk) elif evhtype == 'HFO': channel = dataset[k].attrs['channel'] tstamp_idx = dataset[k].attrs['tstamp_idx'] start_idx = dataset[k].attrs['start_idx'] end_idx = dataset[k].attrs['end_idx'] ths_value = dataset[k].attrs['ths_value'] sample_rate = dataset[k].attrs['sample_rate'] cutoff = dataset[k].attrs['cutoff'] info = dataset[k].attrs['info'] hfo = hfoObj(channel,tstamp,tstamp_idx, waveform,start_idx,end_idx,ths_value,sample_rate,cutoff,info) Data.__addEvent__(hfo) h5.close() return Data