Exemplo n.º 1
0
                        memmap_folder=memap_folder,
                        save_for_spikedetekt=False,
                        channels_to_save=None,
                        return_sliced_data=False)
pl.save(os.path.join(memap_folder, 'B14R9_raw.npy'), data)

data_ecog = data[:64, :]
data_probe = data[64:, :]

data_probe_hp = pl.memmap(os.path.join(memap_folder, 'data_probe_hp.dat'),
                          dtype='int16',
                          mode='w+',
                          shape=pl.shape(data_probe))
for i in pl.arange(0, pl.shape(data_probe)[0]):
    data_probe_hp[i, :] = filters.high_pass_filter(data_probe[i, :],
                                                   Fsampling=f_sampling,
                                                   Fcutoff=f_hp_cutoff)
    data_probe_hp.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_probe_hp.npy'), data_probe_hp)

shape_data_ss = (pl.shape(data_ecog)[0],
                 pl.shape(data_ecog)[1] / int(f_sampling / f_subsample))
data_ecog_lp_ss = pl.memmap(os.path.join(memap_folder, 'data_ecog_lp_ss.dat'),
                            dtype='int16',
                            mode='w+',
                            shape=shape_data_ss)
for i in pl.arange(0, pl.shape(data_ecog)[0]):
    data_ecog_lp_ss[i, :] = signal.decimate(
        filters.low_pass_filter(data_ecog[i, :],
                                Fsampling=f_sampling,
num_of_points_for_baseline = 10

selected_spikes_data_cube = tsne_cluster.create_data_cube_from_raw_extra_data(raw_data_ivm, data_cube_filename,
                                                                              num_ivm_channels,
                                                                              num_of_points_in_spike_trig,
                                                                              cube_type, spike_times_of_indices_all,
                                                                              num_of_points_in_spike_trig-1)

selected_spikes_concatenated = np.reshape(selected_spikes_data_cube, ((num_ivm_channels *
                                                                      num_of_points_in_spike_trig),
                                                                      len(spike_times_of_indices_all))).transpose()

selected_spikes_data_cube_filtered = np.zeros((num_ivm_channels, num_of_points_in_spike_trig,
                                                 len(spike_times_of_indices_all)))
for spike in np.arange(len(spike_times_of_indices_all)):
    selected_spikes_data_cube_filtered[:, :, spike] = filters.high_pass_filter(selected_spikes_data_cube[:, :, spike],
                                                                                  30000, 30000 / 128) # removes the slant in each trial

selected_spikes_concatenated_filtered = np.reshape(selected_spikes_data_cube_filtered, ((num_ivm_channels *
                                                                      num_of_points_in_spike_trig),
                                                                      len(spike_times_of_indices_all))).transpose()

from sklearn.neural_network import BernoulliRBM as RBM

X = np.zeros((selected_spikes_concatenated_filtered.shape[0], selected_spikes_concatenated_filtered.shape[1]))
for i in np.arange(selected_spikes_concatenated_filtered.shape[0]):
    X[i, :] = (selected_spikes_concatenated_filtered[i, :] - np.min(selected_spikes_concatenated_filtered[i, :], 0)) / (np.max(selected_spikes_concatenated_filtered[i, :], 0) - np.min(selected_spikes_concatenated_filtered[i, :], 0))  # 0-1 scaling

rbm = RBM(n_components=200, learning_rate=0.05, batch_size=100, n_iter=500)
rbm.fit(X.transpose())

rbm_features_tsne_selected_indices = TSNE.t_sne(rbm.components_.transpose(),
phases_all_shaftC = pl.load(os.path.join(memap_folder, 'phases_all_shaftC.npy'), mmap_mode=None)

data = pl.load(os.path.join(memap_folder,'B14R9_raw.npy'), mmap_mode='r+')


#----------Data generation-----------------
data = lio.read_all_csc(folder,  assume_same_fs=False, memmap=True, memmap_folder=memap_folder, save_for_spikedetekt=False, channels_to_save=None, return_sliced_data=False)
pl.save(os.path.join(memap_folder, 'B14R9_raw.npy'), data)

data_ecog = data[:64,:]
data_probe = data[64:,:]


data_probe_hp = pl.memmap(os.path.join(memap_folder,'data_probe_hp.dat'), dtype='int16', mode='w+', shape=pl.shape(data_probe))
for i in pl.arange(0, pl.shape(data_probe)[0]):
    data_probe_hp[i,:] = filters.high_pass_filter(data_probe[i,:], Fsampling=f_sampling, Fcutoff=f_hp_cutoff)
    data_probe_hp.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_probe_hp.npy'), data_probe_hp)


shape_data_ss = (pl.shape(data_ecog)[0], pl.shape(data_ecog)[1]/int(f_sampling/f_subsample))
data_ecog_lp_ss = pl.memmap(os.path.join(memap_folder, 'data_ecog_lp_ss.dat'), dtype='int16', mode='w+', shape=shape_data_ss)
for i in pl.arange(0, pl.shape(data_ecog)[0]):
    data_ecog_lp_ss[i,:] = signal.decimate(filters.low_pass_filter(data_ecog[i,:], Fsampling=f_sampling, Fcutoff=f_lp_cutoff), int(f_sampling/f_subsample))
    data_ecog_lp_ss.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_ecog_lp_ss.npy'), data_ecog_lp_ss)


selected_spikes_data_cube = tsne_cluster.create_data_cube_from_raw_extra_data(
    raw_data_ivm, data_cube_filename, num_ivm_channels,
    num_of_points_in_spike_trig, cube_type, spike_times_of_indices_all,
    num_of_points_in_spike_trig - 1)

selected_spikes_concatenated = np.reshape(
    selected_spikes_data_cube,
    ((num_ivm_channels * num_of_points_in_spike_trig),
     len(spike_times_of_indices_all))).transpose()

selected_spikes_data_cube_filtered = np.zeros(
    (num_ivm_channels, num_of_points_in_spike_trig,
     len(spike_times_of_indices_all)))
for spike in np.arange(len(spike_times_of_indices_all)):
    selected_spikes_data_cube_filtered[:, :, spike] = filters.high_pass_filter(
        selected_spikes_data_cube[:, :, spike], 30000,
        30000 / 128)  # removes the slant in each trial

selected_spikes_concatenated_filtered = np.reshape(
    selected_spikes_data_cube_filtered,
    ((num_ivm_channels * num_of_points_in_spike_trig),
     len(spike_times_of_indices_all))).transpose()

from sklearn.neural_network import BernoulliRBM as RBM

X = np.zeros((selected_spikes_concatenated_filtered.shape[0],
              selected_spikes_concatenated_filtered.shape[1]))
for i in np.arange(selected_spikes_concatenated_filtered.shape[0]):
    X[i, :] = (selected_spikes_concatenated_filtered[i, :] -
               np.min(selected_spikes_concatenated_filtered[i, :], 0)) / (
                   np.max(selected_spikes_concatenated_filtered[i, :], 0) -
def time_lock_raw_data(data, events, times_to_cut, sampling_freq, baseline_time=None, sub_sample_freq=None,
                       high_pass_cutoff=None, rectify=False, low_pass_cutoff=None, avg_reref=False, keep_trials=False):
    """
    Time locks, baselines, high or low passes, and sub samples the data (in that order)
    """
    if np.ndim(events) == 2:
        events = events[0, :]
    number_of_trials = np.size(events, np.ndim(events) - 1)
    times_to_cut = np.array(times_to_cut)
    samples_to_cut = (times_to_cut * sampling_freq).astype(int)
    if np.size(np.shape(data)) > 1:
        number_of_channels = np.shape(data)[0]
    else:
        number_of_channels = 1
    number_of_samples = samples_to_cut[1] - samples_to_cut[0]
    time_axis = np.arange(times_to_cut[0], times_to_cut[1], (times_to_cut[1] - times_to_cut[0]) / number_of_samples)

    if sub_sample_freq:
        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data = np.zeros(
                    [number_of_channels, math.ceil(number_of_samples * (sub_sample_freq / sampling_freq)),
                     number_of_trials])
            else:
                tl_singletrial_data = np.zeros(
                    [math.ceil(number_of_samples * (sub_sample_freq / sampling_freq)), number_of_trials])
        tl_avgtrials_data = np.empty(
            [number_of_channels, math.ceil(number_of_samples * (sub_sample_freq / sampling_freq))])
    else:
        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data = np.zeros([number_of_channels, number_of_samples, number_of_trials])
            else:
                tl_singletrial_data = np.zeros([number_of_samples, number_of_trials])
        tl_avgtrials_data = np.zeros([number_of_channels, number_of_samples])

    for index in range(number_of_trials):
        temp_samples_to_cut = samples_to_cut + events[index]
        breakpoint = False

        # if there aren't enough points at the begining of the data set then disregard the event and move to the next one
        while np.min(temp_samples_to_cut) < 0:
            if temp_samples_to_cut[0] < 0:
                index = index + 1
                temp_samples_to_cut = samples_to_cut + events[index]
            elif temp_samples_to_cut[1] < 0:
                breakpoint = True
        if breakpoint:
            break
        if np.size(np.shape(data)) > 1:
            temp_data = data[:, int(temp_samples_to_cut[0]): int(temp_samples_to_cut[1])]
        else:
            temp_data = data[int(temp_samples_to_cut[0]): int(temp_samples_to_cut[1])]

        if avg_reref:  # rereference with mean over all channels
            temp_data = temp_data - np.mean(temp_data, 0)

        if high_pass_cutoff:
            temp_data = filt.high_pass_filter(temp_data, sampling_freq, high_pass_cutoff)
        elif low_pass_cutoff:
            temp_data = filt.low_pass_filter(temp_data, sampling_freq, low_pass_cutoff)
        if rectify:
            temp_data = np.abs(temp_data)
            if low_pass_cutoff:
                temp_data = filt.low_pass_filter(temp_data, sampling_freq, low_pass_cutoff)
            else:
                temp_data = filt.low_pass_filter(temp_data, sampling_freq, high_pass_cutoff / 2)
        elif not rectify and high_pass_cutoff:
            temp_data = filt.low_pass_filter(temp_data, sampling_freq, high_pass_cutoff / 2)

        if sub_sample_freq:
            temp_data, sub_time_axis = subsample_data(temp_data, time_axis, sampling_freq, sub_sample_freq,
                                                      filterType='fir', filterOrder=30)

        if baseline_time is not None:
            if sub_sample_freq:
                temp_data = baseline_correct(temp_data, sub_sample_freq, time_axis, baseline_time[0], baseline_time[1])
            else:
                temp_data = baseline_correct(temp_data, sampling_freq, time_axis, baseline_time[0], baseline_time[1])

        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data[:, :, index] = temp_data
            else:
                tl_singletrial_data[:, index] = temp_data

        if index == 0:
            tl_avgtrials_data = temp_data
        elif index > 0:
            tl_avgtrials_data += temp_data

        if index % 10 == 0:
            print(index)

    tl_avgtrials_data /= number_of_trials
    if sub_sample_freq:
        time_axis = sub_time_axis

    returned_tuple = [tl_avgtrials_data, time_axis]
    if keep_trials:
        returned_tuple = [tl_singletrial_data, tl_avgtrials_data, time_axis]

    return returned_tuple
Exemplo n.º 6
0
def time_lock_raw_data(data,
                       events,
                       times_to_cut,
                       sampling_freq,
                       baseline_time=None,
                       sub_sample_freq=None,
                       high_pass_cutoff=None,
                       rectify=False,
                       low_pass_cutoff=None,
                       avg_reref=False,
                       keep_trials=False):
    """
    Time locks, baselines, high or low passes, and sub samples the data (in that order)
    """
    if np.ndim(events) == 2:
        events = events[0, :]
    number_of_trials = np.size(events, np.ndim(events) - 1)
    times_to_cut = np.array(times_to_cut)
    samples_to_cut = (times_to_cut * sampling_freq).astype(int)
    if np.size(np.shape(data)) > 1:
        number_of_channels = np.shape(data)[0]
    else:
        number_of_channels = 1
    number_of_samples = samples_to_cut[1] - samples_to_cut[0]
    time_axis = np.arange(times_to_cut[0], times_to_cut[1],
                          (times_to_cut[1] - times_to_cut[0]) /
                          number_of_samples)

    if sub_sample_freq:
        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data = np.zeros([
                    number_of_channels,
                    math.ceil(number_of_samples *
                              (sub_sample_freq / sampling_freq)),
                    number_of_trials
                ])
            else:
                tl_singletrial_data = np.zeros([
                    math.ceil(number_of_samples *
                              (sub_sample_freq / sampling_freq)),
                    number_of_trials
                ])
        tl_avgtrials_data = np.empty([
            number_of_channels,
            math.ceil(number_of_samples * (sub_sample_freq / sampling_freq))
        ])
    else:
        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data = np.zeros(
                    [number_of_channels, number_of_samples, number_of_trials])
            else:
                tl_singletrial_data = np.zeros(
                    [number_of_samples, number_of_trials])
        tl_avgtrials_data = np.zeros([number_of_channels, number_of_samples])

    for index in range(number_of_trials):
        temp_samples_to_cut = samples_to_cut + events[index]
        breakpoint = False

        # if there aren't enough points at the begining of the data set then disregard the event and move to the next one
        while np.min(temp_samples_to_cut) < 0:
            if temp_samples_to_cut[0] < 0:
                index = index + 1
                temp_samples_to_cut = samples_to_cut + events[index]
            elif temp_samples_to_cut[1] < 0:
                breakpoint = True
        if breakpoint:
            break
        if np.size(np.shape(data)) > 1:
            temp_data = data[:,
                             int(temp_samples_to_cut[0]
                                 ):int(temp_samples_to_cut[1])]
        else:
            temp_data = data[int(temp_samples_to_cut[0]
                                 ):int(temp_samples_to_cut[1])]

        if avg_reref:  # rereference with mean over all channels
            temp_data = temp_data - np.mean(temp_data, 0)

        if high_pass_cutoff:
            temp_data = filt.high_pass_filter(temp_data, sampling_freq,
                                              high_pass_cutoff)
        elif low_pass_cutoff:
            temp_data = filt.low_pass_filter(temp_data, sampling_freq,
                                             low_pass_cutoff)
        if rectify:
            temp_data = np.abs(temp_data)
            if low_pass_cutoff:
                temp_data = filt.low_pass_filter(temp_data, sampling_freq,
                                                 low_pass_cutoff)
            else:
                temp_data = filt.low_pass_filter(temp_data, sampling_freq,
                                                 high_pass_cutoff / 2)
        elif not rectify and high_pass_cutoff:
            temp_data = filt.low_pass_filter(temp_data, sampling_freq,
                                             high_pass_cutoff / 2)

        if sub_sample_freq:
            temp_data, sub_time_axis = subsample_data(temp_data,
                                                      time_axis,
                                                      sampling_freq,
                                                      sub_sample_freq,
                                                      filterType='fir',
                                                      filterOrder=30)

        if baseline_time is not None:
            if sub_sample_freq:
                temp_data = baseline_correct(temp_data, sub_sample_freq,
                                             time_axis, baseline_time[0],
                                             baseline_time[1])
            else:
                temp_data = baseline_correct(temp_data, sampling_freq,
                                             time_axis, baseline_time[0],
                                             baseline_time[1])

        if keep_trials:
            if np.size(np.shape(data)) > 1:
                tl_singletrial_data[:, :, index] = temp_data
            else:
                tl_singletrial_data[:, index] = temp_data

        if index == 0:
            tl_avgtrials_data = temp_data
        elif index > 0:
            tl_avgtrials_data += temp_data

        if index % 10 == 0:
            print(index)

    tl_avgtrials_data /= number_of_trials
    if sub_sample_freq:
        time_axis = sub_time_axis

    returned_tuple = [tl_avgtrials_data, time_axis]
    if keep_trials:
        returned_tuple = [tl_singletrial_data, tl_avgtrials_data, time_axis]

    return returned_tuple