def create_piezosensor_events(event_channel, threshold, sampling_freq, filt_cutoff_freq, minduration, pickOutOrIn):
    if filt_cutoff_freq is not None:
        event_channel = filt.low_pass_filter(event_channel, sampling_freq, filt_cutoff_freq)
    event_channel = event_channel - np.mean(event_channel[0:10])
    event_transitions = np.diff(np.int32(event_channel < threshold))
    event_samples = np.nonzero(event_transitions)
    event_samples = np.squeeze(event_samples)
    event_values = event_transitions[event_samples]
    event_durations = event_samples[event_values < 0] - event_samples[event_values > 0]
    valid_indices = np.squeeze(np.nonzero(np.abs(event_durations) > minduration))
    if pickOutOrIn:
        event_samples = event_samples[event_values > 0][valid_indices]
        event_values = event_values[event_values > 0][valid_indices]
    else:
        event_samples = event_samples[event_values < 0][valid_indices]
        event_values = event_values[event_values < 0][valid_indices]
    return np.array([event_samples, event_values])
コード例 #2
0
def create_piezosensor_events(event_channel, threshold, sampling_freq,
                              filt_cutoff_freq, minduration, pickOutOrIn):
    if filt_cutoff_freq is not None:
        event_channel = filt.low_pass_filter(event_channel, sampling_freq,
                                             filt_cutoff_freq)
    event_channel = event_channel - np.mean(event_channel[0:10])
    event_transitions = np.diff(np.int32(event_channel < threshold))
    event_samples = np.nonzero(event_transitions)
    event_samples = np.squeeze(event_samples)
    event_values = event_transitions[event_samples]
    event_durations = event_samples[event_values < 0] - event_samples[
        event_values > 0]
    valid_indices = np.squeeze(
        np.nonzero(np.abs(event_durations) > minduration))
    if pickOutOrIn:
        event_samples = event_samples[event_values > 0][valid_indices]
        event_values = event_values[event_values > 0][valid_indices]
    else:
        event_samples = event_samples[event_values < 0][valid_indices]
        event_values = event_values[event_values < 0][valid_indices]
    return np.array([event_samples, event_values])
コード例 #3
0
data_ecog = data[:64, :]
data_probe = data[64:, :]

data_probe_hp = pl.memmap(os.path.join(memap_folder, 'data_probe_hp.dat'),
                          dtype='int16',
                          mode='w+',
                          shape=pl.shape(data_probe))
for i in pl.arange(0, pl.shape(data_probe)[0]):
    data_probe_hp[i, :] = filters.high_pass_filter(data_probe[i, :],
                                                   Fsampling=f_sampling,
                                                   Fcutoff=f_hp_cutoff)
    data_probe_hp.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_probe_hp.npy'), data_probe_hp)

shape_data_ss = (pl.shape(data_ecog)[0],
                 pl.shape(data_ecog)[1] / int(f_sampling / f_subsample))
data_ecog_lp_ss = pl.memmap(os.path.join(memap_folder, 'data_ecog_lp_ss.dat'),
                            dtype='int16',
                            mode='w+',
                            shape=shape_data_ss)
for i in pl.arange(0, pl.shape(data_ecog)[0]):
    data_ecog_lp_ss[i, :] = signal.decimate(
        filters.low_pass_filter(data_ecog[i, :],
                                Fsampling=f_sampling,
                                Fcutoff=f_lp_cutoff),
        int(f_sampling / f_subsample))
    data_ecog_lp_ss.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_ecog_lp_ss.npy'), data_ecog_lp_ss)
コード例 #4
0
data_ecog = data[:64,:]
data_probe = data[64:,:]


data_probe_hp = pl.memmap(os.path.join(memap_folder,'data_probe_hp.dat'), dtype='int16', mode='w+', shape=pl.shape(data_probe))
for i in pl.arange(0, pl.shape(data_probe)[0]):
    data_probe_hp[i,:] = filters.high_pass_filter(data_probe[i,:], Fsampling=f_sampling, Fcutoff=f_hp_cutoff)
    data_probe_hp.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_probe_hp.npy'), data_probe_hp)


shape_data_ss = (pl.shape(data_ecog)[0], pl.shape(data_ecog)[1]/int(f_sampling/f_subsample))
data_ecog_lp_ss = pl.memmap(os.path.join(memap_folder, 'data_ecog_lp_ss.dat'), dtype='int16', mode='w+', shape=shape_data_ss)
for i in pl.arange(0, pl.shape(data_ecog)[0]):
    data_ecog_lp_ss[i,:] = signal.decimate(filters.low_pass_filter(data_ecog[i,:], Fsampling=f_sampling, Fcutoff=f_lp_cutoff), int(f_sampling/f_subsample))
    data_ecog_lp_ss.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_ecog_lp_ss.npy'), data_ecog_lp_ss)



spike_samples = tf.spikedetect(data_probe_hp, threshold_multiplier=6.5, bad_channels=probe_bad_channels)
pl.save(os.path.join(memap_folder, 'spike_samples.npy'), spike_samples)


spike_samples_clean = spike_samples
for i in pl.arange(pl.size(spike_samples_clean)-1,-1,-1):
    data = data_probe_hp[:, spike_samples[i]-60:spike_samples[i]+60]
    stdevs = sp.std(data,1)
    if np.max(data) > 3000 or pl.any(stdevs>600):
コード例 #5
0
data_probe = data[64:,:]


data_probe_hp = pl.memmap(os.path.join(memap_folder,'data_probe_hp.dat'), dtype='int16', mode='w+', shape=pl.shape(data_probe))
for i in pl.arange(0, pl.shape(data_probe)[0]):
    data_probe_hp[i,:] = filters.high_pass_filter(data_probe[i, :], Fsampling=f_sampling, Fcutoff=f_hp_cutoff)
    data_probe_hp.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_probe_hp.npy'), data_probe_hp)


shape_data_ss = (pl.shape(data_ecog)[0], pl.shape(data_ecog)[1]/int(f_sampling/f_subsample))
data_ecog_lp_ss = pl.memmap(os.path.join(memap_folder, 'data_ecog_lp_ss.dat'), dtype='int16', mode='w+', shape=shape_data_ss)
for i in pl.arange(0, pl.shape(data_ecog)[0]):
    data_ecog_lp_ss[i,:] = signal.decimate(
        filters.low_pass_filter(data_ecog[i, :], Fsampling=f_sampling, Fcutoff=f_lp_cutoff), int(f_sampling / f_subsample))
    data_ecog_lp_ss.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_ecog_lp_ss.npy'), data_ecog_lp_ss)


spike_samples = tf.spikedetect(data_probe_hp, threshold_multiplier=6.5, bad_channels=probe_bad_channels)
pl.save(os.path.join(memap_folder, 'spike_samples.npy'), spike_samples)


spike_samples_clean = spike_samples
for i in pl.arange(pl.size(spike_samples_clean)-1,-1,-1):
    data = data_probe_hp[:, spike_samples[i]-60:spike_samples[i]+60]
    stdevs = sp.std(data,1)
    if np.max(data) > 3000 or pl.any(stdevs>600):
        spike_samples_clean = pl.delete(spike_samples_clean, i)
def time_lock_raw_data(data, events, times_to_cut, sampling_freq, baseline_time=None, sub_sample_freq=None,
                       high_pass_cutoff=None, rectify=False, low_pass_cutoff=None, avg_reref=False, keep_trials=False):
    """
    Time locks, baselines, high or low passes, and sub samples the data (in that order)
    """
    if np.ndim(events) == 2:
        events = events[0, :]
    number_of_trials = np.size(events, np.ndim(events) - 1)
    times_to_cut = np.array(times_to_cut)
    samples_to_cut = (times_to_cut * sampling_freq).astype(int)
    if np.size(np.shape(data)) > 1:
        number_of_channels = np.shape(data)[0]
    else:
        number_of_channels = 1
    number_of_samples = samples_to_cut[1] - samples_to_cut[0]
    time_axis = np.arange(times_to_cut[0], times_to_cut[1], (times_to_cut[1] - times_to_cut[0]) / number_of_samples)

    if sub_sample_freq:
        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data = np.zeros(
                    [number_of_channels, math.ceil(number_of_samples * (sub_sample_freq / sampling_freq)),
                     number_of_trials])
            else:
                tl_singletrial_data = np.zeros(
                    [math.ceil(number_of_samples * (sub_sample_freq / sampling_freq)), number_of_trials])
        tl_avgtrials_data = np.empty(
            [number_of_channels, math.ceil(number_of_samples * (sub_sample_freq / sampling_freq))])
    else:
        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data = np.zeros([number_of_channels, number_of_samples, number_of_trials])
            else:
                tl_singletrial_data = np.zeros([number_of_samples, number_of_trials])
        tl_avgtrials_data = np.zeros([number_of_channels, number_of_samples])

    for index in range(number_of_trials):
        temp_samples_to_cut = samples_to_cut + events[index]
        breakpoint = False

        # if there aren't enough points at the begining of the data set then disregard the event and move to the next one
        while np.min(temp_samples_to_cut) < 0:
            if temp_samples_to_cut[0] < 0:
                index = index + 1
                temp_samples_to_cut = samples_to_cut + events[index]
            elif temp_samples_to_cut[1] < 0:
                breakpoint = True
        if breakpoint:
            break
        if np.size(np.shape(data)) > 1:
            temp_data = data[:, int(temp_samples_to_cut[0]): int(temp_samples_to_cut[1])]
        else:
            temp_data = data[int(temp_samples_to_cut[0]): int(temp_samples_to_cut[1])]

        if avg_reref:  # rereference with mean over all channels
            temp_data = temp_data - np.mean(temp_data, 0)

        if high_pass_cutoff:
            temp_data = filt.high_pass_filter(temp_data, sampling_freq, high_pass_cutoff)
        elif low_pass_cutoff:
            temp_data = filt.low_pass_filter(temp_data, sampling_freq, low_pass_cutoff)
        if rectify:
            temp_data = np.abs(temp_data)
            if low_pass_cutoff:
                temp_data = filt.low_pass_filter(temp_data, sampling_freq, low_pass_cutoff)
            else:
                temp_data = filt.low_pass_filter(temp_data, sampling_freq, high_pass_cutoff / 2)
        elif not rectify and high_pass_cutoff:
            temp_data = filt.low_pass_filter(temp_data, sampling_freq, high_pass_cutoff / 2)

        if sub_sample_freq:
            temp_data, sub_time_axis = subsample_data(temp_data, time_axis, sampling_freq, sub_sample_freq,
                                                      filterType='fir', filterOrder=30)

        if baseline_time is not None:
            if sub_sample_freq:
                temp_data = baseline_correct(temp_data, sub_sample_freq, time_axis, baseline_time[0], baseline_time[1])
            else:
                temp_data = baseline_correct(temp_data, sampling_freq, time_axis, baseline_time[0], baseline_time[1])

        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data[:, :, index] = temp_data
            else:
                tl_singletrial_data[:, index] = temp_data

        if index == 0:
            tl_avgtrials_data = temp_data
        elif index > 0:
            tl_avgtrials_data += temp_data

        if index % 10 == 0:
            print(index)

    tl_avgtrials_data /= number_of_trials
    if sub_sample_freq:
        time_axis = sub_time_axis

    returned_tuple = [tl_avgtrials_data, time_axis]
    if keep_trials:
        returned_tuple = [tl_singletrial_data, tl_avgtrials_data, time_axis]

    return returned_tuple
コード例 #7
0
def time_lock_raw_data(data,
                       events,
                       times_to_cut,
                       sampling_freq,
                       baseline_time=None,
                       sub_sample_freq=None,
                       high_pass_cutoff=None,
                       rectify=False,
                       low_pass_cutoff=None,
                       avg_reref=False,
                       keep_trials=False):
    """
    Time locks, baselines, high or low passes, and sub samples the data (in that order)
    """
    if np.ndim(events) == 2:
        events = events[0, :]
    number_of_trials = np.size(events, np.ndim(events) - 1)
    times_to_cut = np.array(times_to_cut)
    samples_to_cut = (times_to_cut * sampling_freq).astype(int)
    if np.size(np.shape(data)) > 1:
        number_of_channels = np.shape(data)[0]
    else:
        number_of_channels = 1
    number_of_samples = samples_to_cut[1] - samples_to_cut[0]
    time_axis = np.arange(times_to_cut[0], times_to_cut[1],
                          (times_to_cut[1] - times_to_cut[0]) /
                          number_of_samples)

    if sub_sample_freq:
        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data = np.zeros([
                    number_of_channels,
                    math.ceil(number_of_samples *
                              (sub_sample_freq / sampling_freq)),
                    number_of_trials
                ])
            else:
                tl_singletrial_data = np.zeros([
                    math.ceil(number_of_samples *
                              (sub_sample_freq / sampling_freq)),
                    number_of_trials
                ])
        tl_avgtrials_data = np.empty([
            number_of_channels,
            math.ceil(number_of_samples * (sub_sample_freq / sampling_freq))
        ])
    else:
        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data = np.zeros(
                    [number_of_channels, number_of_samples, number_of_trials])
            else:
                tl_singletrial_data = np.zeros(
                    [number_of_samples, number_of_trials])
        tl_avgtrials_data = np.zeros([number_of_channels, number_of_samples])

    for index in range(number_of_trials):
        temp_samples_to_cut = samples_to_cut + events[index]
        breakpoint = False

        # if there aren't enough points at the begining of the data set then disregard the event and move to the next one
        while np.min(temp_samples_to_cut) < 0:
            if temp_samples_to_cut[0] < 0:
                index = index + 1
                temp_samples_to_cut = samples_to_cut + events[index]
            elif temp_samples_to_cut[1] < 0:
                breakpoint = True
        if breakpoint:
            break
        if np.size(np.shape(data)) > 1:
            temp_data = data[:,
                             int(temp_samples_to_cut[0]
                                 ):int(temp_samples_to_cut[1])]
        else:
            temp_data = data[int(temp_samples_to_cut[0]
                                 ):int(temp_samples_to_cut[1])]

        if avg_reref:  # rereference with mean over all channels
            temp_data = temp_data - np.mean(temp_data, 0)

        if high_pass_cutoff:
            temp_data = filt.high_pass_filter(temp_data, sampling_freq,
                                              high_pass_cutoff)
        elif low_pass_cutoff:
            temp_data = filt.low_pass_filter(temp_data, sampling_freq,
                                             low_pass_cutoff)
        if rectify:
            temp_data = np.abs(temp_data)
            if low_pass_cutoff:
                temp_data = filt.low_pass_filter(temp_data, sampling_freq,
                                                 low_pass_cutoff)
            else:
                temp_data = filt.low_pass_filter(temp_data, sampling_freq,
                                                 high_pass_cutoff / 2)
        elif not rectify and high_pass_cutoff:
            temp_data = filt.low_pass_filter(temp_data, sampling_freq,
                                             high_pass_cutoff / 2)

        if sub_sample_freq:
            temp_data, sub_time_axis = subsample_data(temp_data,
                                                      time_axis,
                                                      sampling_freq,
                                                      sub_sample_freq,
                                                      filterType='fir',
                                                      filterOrder=30)

        if baseline_time is not None:
            if sub_sample_freq:
                temp_data = baseline_correct(temp_data, sub_sample_freq,
                                             time_axis, baseline_time[0],
                                             baseline_time[1])
            else:
                temp_data = baseline_correct(temp_data, sampling_freq,
                                             time_axis, baseline_time[0],
                                             baseline_time[1])

        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data[:, :, index] = temp_data
            else:
                tl_singletrial_data[:, index] = temp_data

        if index == 0:
            tl_avgtrials_data = temp_data
        elif index > 0:
            tl_avgtrials_data += temp_data

        if index % 10 == 0:
            print(index)

    tl_avgtrials_data /= number_of_trials
    if sub_sample_freq:
        time_axis = sub_time_axis

    returned_tuple = [tl_avgtrials_data, time_axis]
    if keep_trials:
        returned_tuple = [tl_singletrial_data, tl_avgtrials_data, time_axis]

    return returned_tuple