コード例 #1
0
def plot_video_topoplot(data, time_axis, channel_positions, times_to_plot=[-0.1, 0.2], time_window=0.002,
                        time_step=0.002, sampling_freq=1000, zlimits=None, filename=None):
    fig = plt.figure()
    sample_step = int(time_step * sampling_freq)
    sub_time_indices = np.arange(ut.find_closest(time_axis, times_to_plot[0]), ut.find_closest(time_axis,
                                                                                               times_to_plot[1]))
    sub_time_indices = sub_time_indices[0::sample_step]
    if np.shape(channel_positions)[0] <= 64:
        text_y = 8.3
    elif np.shape(channel_positions)[0] <= 128:
        text_y = 16.5
    text_x = 2
    images = []
    for t in sub_time_indices:
        samples = [t, t + (time_window*sampling_freq)]
        data_to_plot = np.mean(data[:, int(samples[0]):int(samples[1])], 1)
        image, scat = plot_topoplot(channel_positions, data_to_plot, show=False, interpmethod="quadric", gridscale=5,
                                    zlimits = zlimits)
        txt = plt.text(x=text_x, y=text_y, s=str(time_axis[t])+' secs')
        images.append([image, scat, txt])
    FFwriter = animation.FFMpegWriter()
    ani = animation.ArtistAnimation(fig, images, interval=500, blit=True, repeat_delay=1000)
    plt.colorbar(mappable=image)
    if filename is not None:
        plt.rcParams['animation.ffmpeg_path'] = r"C:\George\Development\PythonProjects\AnalysisDevelopment\Code\ExtraRequirements\ffmpeg-20140618-git-7f52960-win64-static\bin\ffmpeg.exe"
        ani.save(filename, writer=FFwriter, fps=1, bitrate=5000, dpi=300, extra_args=['h264'])
    plt.show()
コード例 #2
0
def plot_video_topoplot(data, time_axis, channel_positions, times_to_plot=[-0.1, 0.2], time_window=0.002,
                        time_step=0.002, sampling_freq=1000, zlimits=None, filename=None):
    fig = plt.figure()
    sample_step = int(time_step * sampling_freq)
    sub_time_indices = np.arange(ut.find_closest(time_axis, times_to_plot[0]), ut.find_closest(time_axis,
                                                                                               times_to_plot[1]))
    sub_time_indices = sub_time_indices[0::sample_step]
    if np.shape(channel_positions)[0] <= 64:
        text_y = 8.3
    elif np.shape(channel_positions)[0] <= 128:
        text_y = 16.5
    text_x = 2
    images = []
    for t in sub_time_indices:
        samples = [t, t + (time_window*sampling_freq)]
        data_to_plot = np.mean(data[:, int(samples[0]):int(samples[1])], 1)
        image, scat = plot_topoplot(channel_positions, data_to_plot, show=False, interpmethod="quadric", gridscale=5,
                                    zlimits = zlimits)
        txt = plt.text(x=text_x, y=text_y, s=str(time_axis[t])+' secs')
        images.append([image, scat, txt])
    FFwriter = animation.FFMpegWriter()
    ani = animation.ArtistAnimation(fig, images, interval=500, blit=True, repeat_delay=1000)
    plt.colorbar(mappable=image)
    if filename is not None:
        plt.rcParams['animation.ffmpeg_path'] = r"C:\George\Development\PythonProjects\AnalysisDevelopment\Code\ExtraRequirements\ffmpeg-20140618-git-7f52960-win64-static\bin\ffmpeg.exe"
        ani.save(filename, writer=FFwriter, fps=1, bitrate=5000, dpi=300, extra_args=['h264'])
    plt.show()
コード例 #3
0
def gen_clusters_subselection(clusters_subselection_all, code):
    kwik_file = join(basic_dir, geometry_dir.format(channel_number[code], geometry_descriptions[code]),
                     'threshold_6_5std.kwik')
    h5file = h5.File(kwik_file, mode='r')
    extra_spike_times_subselection = np.array(list(h5file['channel_groups/0/spikes/time_samples']))
    h5file.close()

    clusters_subselection = {}
    for c in np.arange(len(clusters)):
        common_spikes, indices_of_common_spikes, not_common_spikes = \
            ut.find_points_in_array_with_jitter(extra_spike_times_128ch[clusters[c]], extra_spike_times_subselection, 7)
        clusters_subselection[c] = indices_of_common_spikes

    clusters_subselection_all[code] = clusters_subselection

    return clusters_subselection_all
コード例 #4
0
def gen_clusters_subselection(clusters_subselection_all, code):
    kwik_file = join(
        basic_dir,
        geometry_dir.format(channel_number[code], geometry_descriptions[code]),
        'threshold_6_5std.kwik')
    h5file = h5.File(kwik_file, mode='r')
    extra_spike_times_subselection = np.array(
        list(h5file['channel_groups/0/spikes/time_samples']))
    h5file.close()

    clusters_subselection = {}
    for c in np.arange(len(clusters)):
        common_spikes, indices_of_common_spikes, not_common_spikes = \
            ut.find_points_in_array_with_jitter(extra_spike_times_128ch[clusters[c]], extra_spike_times_subselection, 7)
        clusters_subselection[c] = indices_of_common_spikes

    clusters_subselection_all[code] = clusters_subselection

    return clusters_subselection_all
コード例 #5
0
def create_juxta_label(kwik_file,
                       spike_thresholds,
                       num_of_spike_groups=1,
                       adc_channel_used=0,
                       adc_dtype=np.uint16,
                       inter_spike_time_distance=0.002,
                       amp_gain=100,
                       num_of_raw_data_channels=None,
                       spike_channels=None,
                       verbose=True):
    """
    Find the juxta spikes in the extra spike train and label them according to size splitting them into
    num_of_spike_groups groups

    Parameters
    ----------
    kwik_file
    spike_thresholds
    num_of_spike_groups
    adc_channel_used
    adc_dtype
    inter_spike_time_distance
    amp_gain
    num_of_raw_data_channels
    spike_channels
    verbose

    Returns
    -------

    """

    h5file = h5.File(kwik_file, mode='r')
    extra_spike_times = np.array(
        list(h5file['channel_groups/0/spikes/time_samples']))
    h5file.close()

    spikes_used = len(extra_spike_times)
    if verbose:
        print("Total spikes in extra = " + str(len(extra_spike_times)))

    # Get the juxta spikes and generate labels
    # 1) Generate the juxta spike time triggers (and the adc traces in Volts)
    raw_juxta_data_file = r'D:\Data\George\Projects\SpikeSorting\Joana_Paired_128ch\2015-09-03\Data' + \
                          r'\adc2015-09-03T21_18_47.bin'
    raw_data_patch = ephys.load_raw_event_trace(raw_juxta_data_file,
                                                number_of_channels=8,
                                                channel_used=adc_channel_used,
                                                dtype=adc_dtype)
    juxta_spike_triggers, juxta_spike_peaks, juxta_spike_data_in_V = tf.create_spike_triggered_events(
        raw_data_patch.dataMatrix,
        threshold=spike_thresholds,
        inter_spike_time_distance=inter_spike_time_distance,
        amp_gain=amp_gain)
    num_of_spikes = len(juxta_spike_triggers)
    if verbose:
        print('Total spikes in Juxta = ' + str(num_of_spikes))

    # 2) Seperate the juxta spikes into a number of groups according to their size
    juxta_spikes_grouped, juxta_spike_peaks_grouped, juxta_spike_triggers_grouped_withnans,\
            juxta_spike_peaks_grouped_withnans, spike_thresholds_groups = \
        split_juxta_spikes_into_groups_by_size(num_of_spike_groups=num_of_spike_groups,
                                                   juxta_spike_peaks=juxta_spike_peaks,
                                                   juxta_spike_triggers=juxta_spike_triggers)

    # 3) Find the common spikes between the extra apikes and the juxta spikes for all the juxta spikes and for all the
    # sub groups of juxta spikes and group the good spikes
    common_spikes_grouped = {}
    juxta_spikes_not_found_grouped = {}
    indices_of_common_extra_spikes_grouped = {}
    for g in range(1, num_of_spike_groups + 1):
        common_spikes_grouped[g], indices_of_common_extra_spikes_grouped[g], juxta_spikes_not_found_grouped[g] = \
             ut.find_points_in_array_with_jitter(array_of_points_to_be_found=juxta_spikes_grouped[g],
                                                 array_to_search=extra_spike_times[:spikes_used],
                                                 jitter_around_each_point=7)
    if spike_channels is not None and num_of_raw_data_channels is not None:
        common_spikes_grouped[g], indices_of_common_extra_spikes_grouped[g] \
            = select_spikes_in_certain_channels(spike_thresholds, common_spikes_grouped[g],
                                                indices_of_common_extra_spikes_grouped[g],
                                                spike_channels, num_of_raw_data_channels)

    # 5) Get the t-sne indices of the grouped juxta spikes
    indices_of_data_for_tsne = range(spikes_used)
    juxta_cluster_indices_grouped = {}
    for g in range(0, num_of_spike_groups):
        juxta_cluster_indices_temp = np.intersect1d(
            indices_of_data_for_tsne,
            indices_of_common_extra_spikes_grouped[g + 1])
        juxta_cluster_indices_grouped[g] = [
            i for i in np.arange(0, len(indices_of_data_for_tsne)) if len(
                np.where(juxta_cluster_indices_temp ==
                         indices_of_data_for_tsne[i])[0])
        ]
        if verbose and spike_channels is not None:
            print('Labeled after cleaning = ' +
                  str(len(juxta_cluster_indices_grouped[g])))

    return juxta_cluster_indices_grouped, spike_thresholds_groups
コード例 #6
0
#mne.io.Raw.plot(raw=raw_memmaped, duration=2, start=20, n_channels=20, scalings={'eeg': 8000}, remove_dc=True)

id = 1
events_mne = np.c_[np.array(events), np.zeros(len(events), dtype=int), id * np.ones(len(events), dtype=int)]
baseline = (-2.5, -2.3)
event_id = dict(left_paw=id)
epochs = mne.Epochs(raw_memmaped, events_mne, event_id, -3, 3, proj=True, picks=None, baseline=baseline, preload=True, reject=None)
averaged = epochs.average()

power = pickle.load( open(os.path.join(path, "Analysis\\tfr_power.p"), "rb"))

n_cycles = 3
frequencies = np.arange(5, 60, 3)

from mne.time_frequency import tfr_morlet
power, phase_lock = tfr_morlet(epochs, freqs=frequencies, n_cycles=n_cycles, decim=3000, n_jobs=10)



import gui_tfr_viewer
gui_tfr_viewer.TFR_Viewer(power)

box = (0, 0.8, 0, 1.1)
w, h = [.09, .05]

pos = [[ut.normList([x, y], normalizeTo=0.8, vMin=1, vMax=8)[0], ut.normList([x, y], vMin=1, vMax=16)[1], w, h] for [n, s, (x,y)] in cp.sort_index(0, by='Numbers', ascending=True).values]
layout = mne.layouts.Layout(box, pos, cp.sort_index(0, by='Numbers', ascending=True).Strings, cp.sort_index(0, by='Numbers', ascending=True).Numbers, '128ch')

power.plot_topo(picks=None, tmin=-3, tmax=3, fmin=5, fmax=60, vmin=-3e10, vmax=3e10, layout=layout, layout_scale=None)
コード例 #7
0
h5file = h5.File(kwik_file_path, mode='r')
spike_times = np.array(list(h5file['channel_groups/1/spikes/time_samples']))
h5file.close()

spikes_used = 400000#len(spike_times)#130000

# Get clusters
mat_dict = sio.loadmat(spike_mat_file)
labeled_spike_times = mat_dict['gtTimes'][0]

# 1) Get indices of labeled spikes
spikes_labeled_dict = dict()
number_of_labels = labeled_spike_times.__len__()
for i in range(number_of_labels):
    common_spikes, spikes_labeled_dict[i], labeled_spikes_not_found = \
        ut.find_points_in_array_with_jitter(labeled_spike_times[i][:, 0], spike_times[:spikes_used], 6)



# 2) Generate a labels array (each spike is represented by its label number or -1 if it doesn't have a label
labels = np.zeros(spikes_used)
labels[0:] = -1
for l in range(number_of_labels):
    labels[spikes_labeled_dict[l]] = l


# 3) Find how many spikes are labeled
number_of_labeled_spikes = 0
for i in range(number_of_labels):
    number_of_labeled_spikes += labeled_spike_times[i][:, 0].shape[0]
コード例 #8
0
spike_times_phy = np.array(list(h5file['channel_groups/0/spikes/time_samples']))
h5file.close()

t_tsne = np.load(r'D:\Data\George\Projects\SpikeSorting\Joana_Paired_128ch\2015-09-03\Analysis\klustakwik'+\
                 r'\threshold_6_5std\t_sne_results_final_allspikes.npy')

kilosort_experiment_folder = r'thres4_10_10_Fe16_Pc12'  # thres4_10_10_Fe16_Pc12 OR thres4_10_10_Fe256_Pc128 OR thres6_12_12_Fe256_Pc128
kilosort_path = os.path.join(r'D:\Data\George\Projects\SpikeSorting\Joana_Paired_128ch\2015-09-03\Analysis\kilosort', kilosort_experiment_folder)
spike_clusters_kilosort = np.load(os.path.join(kilosort_path, 'kilosort output\\spike_templates.npy'))
spike_times_kilosort = np.load(os.path.join(kilosort_path, 'kilosort output\spike_times.npy'))
template_features = np.load(os.path.join(kilosort_path, 'kilosort output\\template_features.npy'))
template_features_ind = np.load(os.path.join(kilosort_path, 'kilosort output\\template_feature_ind.npy'))
pc_features = np.load(os.path.join(kilosort_path, 'kilosort output\\pc_features.npy'))
pc_features_ind = np.load(os.path.join(kilosort_path, 'kilosort output\\pc_feature_ind.npy'))

common_spikes, indices_of_common_spikes_in_phy, indices_of_common_spikes_in_kilosort, small_train_spikes_not_found = util.find_points_in_array_with_jitter(spike_times_phy, spike_times_kilosort, 6)

np.save(os.path.join(kilosort_path, 'tsne\\small_train_spikes_not_found.npy'), small_train_spikes_not_found)
np.save(os.path.join(kilosort_path, 'tsne\\indices_of_common_spikes.npy'), indices_of_common_spikes_in_phy)
np.save(os.path.join(kilosort_path, 'tsne\common_spikes_in_tsne_train.npy'), common_spikes)
np.save(os.path.join(kilosort_path, 'tsne\indices_of_common_spikes_in_kilosort_train.npy'), indices_of_common_spikes_in_kilosort)
# OR
common_spikes = np.load(os.path.join(kilosort_path, 'tsne\\common_spikes_in_tsne_train.npy'))
indices_of_common_spikes_in_phy = np.load(os.path.join(kilosort_path, 'tsne\\indices_of_common_spikes.npy'))
indices_of_common_spikes_in_kilosort = np.load(os.path.join(kilosort_path, 'tsne\\indices_of_common_spikes_in_kilosort_train.npy'))
small_train_spikes_not_found = np.load(os.path.join(kilosort_path, 'tsne\\small_train_spikes_not_found.npy'))


# plot the phy tsne using the clusters defined by kilosort
kilosort_units = {} # dict has all the phy spikes
for i in np.arange(indices_of_common_spikes_in_kilosort.__len__()):
コード例 #9
0
    spike_channels = np.argmin(t, axis=0)
    good_spike_indices = [
        i for i, x in list(enumerate(spike_channels))
        if np.in1d(x, good_channels)
    ]
    common_spikes = common_spikes[good_spike_indices]
    indices_of_common_spikes_in_extra = indices_of_common_spikes_in_extra[
        good_spike_indices]
    return common_spikes, indices_of_common_spikes_in_extra


# 3) Find the common spikes between the extra apikes and the juxta spikes for all the juxta spikes and for all the
# sub groups of juxta spikes
common_spikes, indices_of_common_spikes_in_extra, juxta_spikes_not_found = \
     ut.find_points_in_array_with_jitter(array_of_points_to_be_found=juxta_spike_triggers,
                                         array_to_search=extra_spike_times[:spikes_used],
                                         jitter_around_each_point=7)
common_spikes_chan_selected = select_spikes_in_certain_channels(
    common_spikes, indices_of_common_spikes_in_extra, raw_data, good_channels)

# 4) Group the good spikes
common_spikes_grouped = {}
common_spikes_grouped_chan_selected = {}
juxta_spikes_not_found_grouped = {}
indices_of_common_extra_spikes_grouped = {}
indices_of_common_extra_spikes_grouped_chan_selected = {}
for g in range(1, num_of_spike_groups + 1):
    common_spikes_grouped[g], indices_of_common_extra_spikes_grouped[g], juxta_spikes_not_found_grouped[g] = \
         ut.find_points_in_array_with_jitter(array_of_points_to_be_found=juxta_spikes_grouped[g],
                                             array_to_search=extra_spike_times[:spikes_used],
                                             jitter_around_each_point=7)
コード例 #10
0
def create_juxta_label(kwik_file, spike_thresholds, num_of_spike_groups=1,
                       adc_channel_used=0, adc_dtype=np.uint16, inter_spike_time_distance=0.002,
                       amp_gain=100,
                       num_of_raw_data_channels=None,
                       spike_channels=None,
                       verbose=True):
    """
    Find the juxta spikes in the extra spike train and label them according to size splitting them into
    num_of_spike_groups groups

    Parameters
    ----------
    kwik_file
    spike_thresholds
    num_of_spike_groups
    adc_channel_used
    adc_dtype
    inter_spike_time_distance
    amp_gain
    num_of_raw_data_channels
    spike_channels
    verbose

    Returns
    -------

    """

    h5file = h5.File(kwik_file, mode='r')
    extra_spike_times = np.array(list(h5file['channel_groups/0/spikes/time_samples']))
    h5file.close()

    spikes_used = len(extra_spike_times)
    if verbose:
        print("Total spikes in extra = " + str(len(extra_spike_times)))


    # Get the juxta spikes and generate labels
    # 1) Generate the juxta spike time triggers (and the adc traces in Volts)
    raw_juxta_data_file = r'D:\Data\George\Projects\SpikeSorting\Joana_Paired_128ch\2015-09-03\Data' + \
                          r'\adc2015-09-03T21_18_47.bin'
    raw_data_patch = ephys.load_raw_event_trace(raw_juxta_data_file, number_of_channels=8,
                                                  channel_used=adc_channel_used, dtype=adc_dtype)
    juxta_spike_triggers, juxta_spike_peaks, juxta_spike_data_in_V = tf.create_spike_triggered_events(raw_data_patch.dataMatrix,
                                                                       threshold=spike_thresholds,
                                                                       inter_spike_time_distance=inter_spike_time_distance,
                                                                       amp_gain=amp_gain)
    num_of_spikes = len(juxta_spike_triggers)
    if verbose:
        print('Total spikes in Juxta = ' + str(num_of_spikes))



    # 2) Seperate the juxta spikes into a number of groups according to their size
    juxta_spikes_grouped, juxta_spike_peaks_grouped, juxta_spike_triggers_grouped_withnans,\
            juxta_spike_peaks_grouped_withnans, spike_thresholds_groups = \
        split_juxta_spikes_into_groups_by_size(num_of_spike_groups=num_of_spike_groups,
                                                   juxta_spike_peaks=juxta_spike_peaks,
                                                   juxta_spike_triggers=juxta_spike_triggers)

    # 3) Find the common spikes between the extra apikes and the juxta spikes for all the juxta spikes and for all the
    # sub groups of juxta spikes and group the good spikes
    common_spikes_grouped = {}
    juxta_spikes_not_found_grouped = {}
    indices_of_common_extra_spikes_grouped = {}
    for g in range(1, num_of_spike_groups+1):
        common_spikes_grouped[g], indices_of_common_extra_spikes_grouped[g], juxta_spikes_not_found_grouped[g] = \
             ut.find_points_in_array_with_jitter(array_of_points_to_be_found=juxta_spikes_grouped[g],
                                                 array_to_search=extra_spike_times[:spikes_used],
                                                 jitter_around_each_point=7)
    if spike_channels is not None and num_of_raw_data_channels is not None:
        common_spikes_grouped[g], indices_of_common_extra_spikes_grouped[g] \
            = select_spikes_in_certain_channels(spike_thresholds, common_spikes_grouped[g],
                                                indices_of_common_extra_spikes_grouped[g],
                                                spike_channels, num_of_raw_data_channels)


    # 5) Get the t-sne indices of the grouped juxta spikes
    indices_of_data_for_tsne = range(spikes_used)
    juxta_cluster_indices_grouped = {}
    for g in range(0, num_of_spike_groups):
        juxta_cluster_indices_temp = np.intersect1d(indices_of_data_for_tsne, indices_of_common_extra_spikes_grouped[g+1])
        juxta_cluster_indices_grouped[g] = [i for i in np.arange(0, len(indices_of_data_for_tsne)) if
                                 len(np.where(juxta_cluster_indices_temp == indices_of_data_for_tsne[i])[0])]
        if verbose and spike_channels is not None:
                print('Labeled after cleaning = ' + str(len(juxta_cluster_indices_grouped[g])))

    return juxta_cluster_indices_grouped, spike_thresholds_groups
コード例 #11
0
#mne.io.Raw.plot(raw=raw_memmaped, duration=2, start=20, n_channels=20, scalings={'eeg': 8000}, remove_dc=True)

id = 1
events_mne = np.c_[np.array(events), np.zeros(len(events), dtype=int), id * np.ones(len(events), dtype=int)]
baseline = (-2.5, -2.3)
event_id = dict(left_paw=id)
epochs = mne.Epochs(raw_memmaped, events_mne, event_id, -3, 3, proj=True, picks=None, baseline=baseline, preload=True, reject=None)
averaged = epochs.average()

power = pickle.load( open(os.path.join(path, "Analysis\\tfr_power.p"), "rb"))

n_cycles = 3
frequencies = np.arange(5, 60, 3)

from mne.time_frequency import tfr_morlet
power, phase_lock = tfr_morlet(epochs, freqs=frequencies, n_cycles=n_cycles, decim=3000, n_jobs=10)



import gui_tfr_viewer
gui_tfr_viewer.TFR_Viewer(power)

box = (0, 0.8, 0, 1.1)
w, h = [.09, .05]

pos = [[ut.normList([x, y], normalizeTo=0.8, vMin=1, vMax=8)[0], ut.normList([x, y], vMin=1, vMax=16)[1], w, h] for [n, s, (x,y)] in cp.sort_index(0, by='Numbers', ascending=True).values]
layout = mne.layouts.Layout(box, pos, cp.sort_index(0, by='Numbers', ascending=True).Strings, cp.sort_index(0, by='Numbers', ascending=True).Numbers, '128ch')

power.plot_topo(picks=None, tmin=-3, tmax=3, fmin=5, fmax=60, vmin=-3e10, vmax=3e10, layout=layout, layout_scale=None)
コード例 #12
0
spike_times_phy = np.array(list(h5file['channel_groups/0/spikes/time_samples']))
h5file.close()

t_tsne = np.load(r'D:\Data\George\Projects\SpikeSorting\Joana_Paired_128ch\2015-09-03\Analysis\klustakwik'+\
                 r'\threshold_6_5std\t_sne_results_final_allspikes.npy')

kilosort_experiment_folder = r'thres4_10_10_Fe16_Pc12'  # thres4_10_10_Fe16_Pc12 OR thres4_10_10_Fe256_Pc128 OR thres6_12_12_Fe256_Pc128
kilosort_path = os.path.join(r'D:\Data\George\Projects\SpikeSorting\Joana_Paired_128ch\2015-09-03\Analysis\kilosort', kilosort_experiment_folder)
spike_clusters_kilosort = np.load(os.path.join(kilosort_path, 'kilosort output\\spike_templates.npy'))
spike_times_kilosort = np.load(os.path.join(kilosort_path, 'kilosort output\spike_times.npy'))
template_features = np.load(os.path.join(kilosort_path, 'kilosort output\\template_features.npy'))
template_features_ind = np.load(os.path.join(kilosort_path, 'kilosort output\\template_feature_ind.npy'))
pc_features = np.load(os.path.join(kilosort_path, 'kilosort output\\pc_features.npy'))
pc_features_ind = np.load(os.path.join(kilosort_path, 'kilosort output\\pc_feature_ind.npy'))

common_spikes, indices_of_common_spikes_in_phy, indices_of_common_spikes_in_kilosort, small_train_spikes_not_found = util.find_points_in_array_with_jitter(spike_times_phy, spike_times_kilosort, 6)

np.save(os.path.join(kilosort_path, 'tsne\\small_train_spikes_not_found.npy'), small_train_spikes_not_found)
np.save(os.path.join(kilosort_path, 'tsne\\indices_of_common_spikes.npy'), indices_of_common_spikes_in_phy)
np.save(os.path.join(kilosort_path, 'tsne\common_spikes_in_tsne_train.npy'), common_spikes)
np.save(os.path.join(kilosort_path, 'tsne\indices_of_common_spikes_in_kilosort_train.npy'), indices_of_common_spikes_in_kilosort)
# OR
common_spikes = np.load(os.path.join(kilosort_path, 'tsne\\common_spikes_in_tsne_train.npy'))
indices_of_common_spikes_in_phy = np.load(os.path.join(kilosort_path, 'tsne\\indices_of_common_spikes.npy'))
indices_of_common_spikes_in_kilosort = np.load(os.path.join(kilosort_path, 'tsne\\indices_of_common_spikes_in_kilosort_train.npy'))
small_train_spikes_not_found = np.load(os.path.join(kilosort_path, 'tsne\\small_train_spikes_not_found.npy'))


# plot the phy tsne using the clusters defined by kilosort
kilosort_units = {} # dict has all the phy spikes
for i in np.arange(indices_of_common_spikes_in_kilosort.__len__()):
コード例 #13
0
h5file = h5.File(kwik_file_path, mode='r')
spike_times = np.array(list(h5file['channel_groups/1/spikes/time_samples']))
h5file.close()

spikes_used = 400000  #len(spike_times)#130000

# Get clusters
mat_dict = sio.loadmat(spike_mat_file)
labeled_spike_times = mat_dict['gtTimes'][0]

# 1) Get indices of labeled spikes
spikes_labeled_dict = dict()
number_of_labels = labeled_spike_times.__len__()
for i in range(number_of_labels):
    common_spikes, spikes_labeled_dict[i], labeled_spikes_not_found = \
        ut.find_points_in_array_with_jitter(labeled_spike_times[i][:, 0], spike_times[:spikes_used], 6)

# 2) Generate a labels array (each spike is represented by its label number or -1 if it doesn't have a label
labels = np.zeros(spikes_used)
labels[0:] = -1
for l in range(number_of_labels):
    labels[spikes_labeled_dict[l]] = l

# 3) Find how many spikes are labeled
number_of_labeled_spikes = 0
for i in range(number_of_labels):
    number_of_labeled_spikes += labeled_spike_times[i][:, 0].shape[0]

# Run t-sne
path_to_save_tmp_data = tsne_video_path
perplexity = 200