Ejemplo n.º 1
0
def test_bin_spikes_gaussian():
    spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]
    time = np.array([0., 10.])

    counts = nept.bin_spikes(spikes, 0.0, 10.0, dt=0.5, window=2.,
                             gaussian_std=0.51, normalized=True)

    assert np.allclose(counts.data, np.array([[0.40347865],
                                              [0.77042907],
                                              [1.00980573],
                                              [1.06273102],
                                              [0.90701256],
                                              [0.65089043],
                                              [0.45510984],
                                              [0.31307944],
                                              [0.18950878],
                                              [0.07738638],
                                              [0.01560105],
                                              [0.00129411],
                                              [0.0],
                                              [0.0],
                                              [0.0],
                                              [0.0],
                                              [0.0],
                                              [0.0],
                                              [0.0]]))
Ejemplo n.º 2
0
def test_bin_spikes_no_window():
    spikes = np.hstack((np.arange(0, 10, 1.4), np.arange(0.2, 5, 0.3)))
    spikes = [nept.SpikeTrain(np.sort(spikes), 'test')]

    counts = nept.bin_spikes(spikes, 0.0, 10.0, dt=4., gaussian_std=None, normalized=False)

    assert np.allclose(counts.data, np.array([[16.], [6.]]))
Ejemplo n.º 3
0
def test_bin_spikes_gaussian_even():
    spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]

    counts = nept.bin_spikes(spikes, 0.0, 10.0, dt=0.5, window=2.,
                             gaussian_std=0.5, normalized=True)

    assert np.allclose(counts.data, np.array([[0.40134569],
                                              [0.77353559],
                                              [1.0133553 ],
                                              [1.06721847],
                                              [0.90916337],
                                              [0.64912917],
                                              [0.45410060],
                                              [0.31272558],
                                              [0.18949094],
                                              [0.07622698],
                                              [0.01460966],
                                              [0.00110826],
                                              [0.0],
                                              [0.0],
                                              [0.0],
                                              [0.0],
                                              [0.0],
                                              [0.0],
                                              [0.0]]))
Ejemplo n.º 4
0
def test_bin_spikes_actual():
    spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]

    counts = nept.bin_spikes(spikes, 0.0, 4.0, dt=0.5,
                             window=2., gaussian_std=None, normalized=False)

    assert np.allclose(counts.data, np.array([[1.], [4.], [4.], [5.], [4.], [2.], [2.]]))
Ejemplo n.º 5
0
def test_bin_spikes_no_window():
    spikes = np.hstack((np.arange(0, 10, 1.4), np.arange(0.2, 5, 0.3)))
    spikes = [nept.SpikeTrain(np.sort(spikes), 'test')]

    counts = nept.bin_spikes(spikes,
                             0.0,
                             10.0,
                             dt=4.,
                             gaussian_std=None,
                             normalized=False)

    assert np.allclose(counts.data, np.array([[16.], [6.]]))
Ejemplo n.º 6
0
def test_bin_spikes_mult_neurons_adjust_window():
    spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1]),
              nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]

    counts = nept.bin_spikes(spikes, 0.0, 4.0, dt=0.5, window=2.5, gaussian_std=None)

    assert np.allclose(counts.data, np.array([[0.8, 0.8],
                                              [0.8, 0.8],
                                              [1.0, 1.0],
                                              [1.0, 1.0],
                                              [1.0, 1.0],
                                              [0.4, 0.4],
                                              [0.4, 0.4]]))
Ejemplo n.º 7
0
def test_bin_spikes_actual():
    spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]

    counts = nept.bin_spikes(spikes,
                             0.0,
                             4.0,
                             dt=0.5,
                             window=2.,
                             gaussian_std=None,
                             normalized=False)

    assert np.allclose(counts.data,
                       np.array([[1.], [4.], [4.], [5.], [4.], [2.], [2.]]))
Ejemplo n.º 8
0
def test_bin_spikes_normalized():
    spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]

    counts = nept.bin_spikes(spikes,
                             0.0,
                             4.0,
                             dt=0.5,
                             window=2.,
                             gaussian_std=None,
                             normalized=True)

    assert np.allclose(
        counts.data, np.array([[0.25], [1.], [1.], [1.25], [1.], [0.5],
                               [0.5]]))
Ejemplo n.º 9
0
def test_bin_spikes_mult_neurons_adjust_window():
    spikes = [
        nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1]),
        nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])
    ]

    counts = nept.bin_spikes(spikes,
                             0.0,
                             4.0,
                             dt=0.5,
                             window=2.5,
                             gaussian_std=None)

    assert np.allclose(
        counts.data,
        np.array([[0.8, 0.8], [0.8, 0.8], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0],
                  [0.4, 0.4], [0.4, 0.4]]))
Ejemplo n.º 10
0
def test_bin_spikes_gaussian_even():
    spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]

    counts = nept.bin_spikes(spikes,
                             0.0,
                             10.0,
                             dt=0.5,
                             window=2.,
                             gaussian_std=0.5,
                             normalized=True)

    assert np.allclose(
        counts.data,
        np.array([[0.40134569], [0.77353559], [1.0133553], [1.06721847],
                  [0.90916337], [0.64912917], [0.45410060], [0.31272558],
                  [0.18949094], [0.07622698], [0.01460966], [0.00110826],
                  [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]))
Ejemplo n.º 11
0
def test_bin_spikes_gaussian():
    spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]
    time = np.array([0., 10.])

    counts = nept.bin_spikes(spikes,
                             0.0,
                             10.0,
                             dt=0.5,
                             window=2.,
                             gaussian_std=0.51,
                             normalized=True)

    assert np.allclose(
        counts.data,
        np.array([[0.40347865], [0.77042907], [1.00980573], [1.06273102],
                  [0.90701256], [0.65089043], [0.45510984], [0.31307944],
                  [0.18950878], [0.07738638], [0.01560105], [0.00129411],
                  [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]))
Ejemplo n.º 12
0
def test_bin_spikes_normalized():
    spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]

    counts = nept.bin_spikes(spikes, 0.0, 4.0, dt=0.5, window=2., gaussian_std=None, normalized=True)

    assert np.allclose(counts.data, np.array([[0.25], [1.], [1.], [1.25], [1.], [0.5], [0.5]]))
def get_likelihoods(info, swr_params, task_labels, n_shuffles=0, save_path=None):
    _, position, spikes, lfp, _ = get_data(info)

    zones = dict()
    zones["u"], zones["shortcut"], zones["novel"] = get_zones(info, position, subset=True)
    combined_zones = zones["u"] + zones["shortcut"] + zones["novel"]
    zones["other"] = ~combined_zones

    if n_shuffles > 0:
        n_passes = n_shuffles
    else:
        n_passes = 1

    session = Session(position, task_labels, zones)

    tuning_curves_fromdata = get_only_tuning_curves(info, position, spikes, info.task_times["phase3"])

    tc_shape = tuning_curves_fromdata.shape

    phase_for_zthresh = "pauseB"

    swrs = nept.detect_swr_hilbert(lfp,
                                   fs=info.fs,
                                   thresh=swr_params["swr_thresh"],
                                   z_thresh=info.lfp_z_thresh,
                                   merge_thresh=swr_params["merge_thresh"],
                                   min_length=swr_params["min_length"],
                                   times_for_z=nept.Epoch(info.task_times[phase_for_zthresh].start,
                                                          info.task_times[phase_for_zthresh].stop))

    swrs = nept.find_multi_in_epochs(spikes, swrs, min_involved=swr_params["min_involved"])

    rest_epochs = nept.rest_threshold(position, thresh=12., t_smooth=0.8)

    for task_label in task_labels:
        epochs_of_interest = info.task_times[task_label].intersect(rest_epochs)

        phase_swrs = epochs_of_interest.overlaps(swrs)
        phase_swrs = phase_swrs[phase_swrs.durations >= 0.05]

        phase_likelihoods = np.zeros((n_passes, phase_swrs.n_epochs, tc_shape[1], tc_shape[2]))
        phase_tuningcurves = np.zeros((n_passes, tc_shape[0], tc_shape[1], tc_shape[2]))
        for n_pass in range(n_passes):

            if n_shuffles > 0:
                tuning_curves = np.random.permutation(tuning_curves_fromdata)
            else:
                tuning_curves = tuning_curves_fromdata

            phase_tuningcurves[n_pass, ] = tuning_curves
            tuning_curves = tuning_curves.reshape(tc_shape[0], tc_shape[1] * tc_shape[2])

            if phase_swrs.n_epochs == 0:
                phase_likelihoods = np.ones((n_passes, 1, tc_shape[1], tc_shape[2])) * np.nan
            else:
                counts_data = []
                counts_time = []
                t_windows = []

                for n_timebin, (start, stop) in enumerate(zip(phase_swrs.starts,
                                                              phase_swrs.stops)):
                    t_window = stop - start  # 0.1 for running, 0.025 for swr

                    sliced_spikes = [spiketrain.time_slice(start, stop) for spiketrain in spikes]

                    these_counts = nept.bin_spikes(sliced_spikes,
                                                   start,
                                                   stop,
                                                   dt=t_window,
                                                   gaussian_std=0.0075,
                                                   normalized=False,
                                                   lastbin=True)

                    counts_data.append(these_counts.data)
                    counts_time.append(these_counts.time)
                    t_windows.append(t_window)

                counts = nept.AnalogSignal(np.vstack(counts_data), np.hstack(counts_time))
                likelihood = nept.bayesian_prob(counts,
                                                tuning_curves,
                                                binsize=t_windows,
                                                min_neurons=3,
                                                min_spikes=1)

                phase_likelihoods[n_pass] = likelihood.reshape(phase_swrs.n_epochs, tc_shape[1], tc_shape[2])

        tasktime = getattr(session, task_label)
        tasktime.likelihoods = phase_likelihoods
        tasktime.tuning_curves = phase_tuningcurves
        tasktime.swrs = phase_swrs

    if save_path is not None:
        session.pickle(save_path)

    return session
Ejemplo n.º 14
0
def get_decoded(info, dt, gaussian_std, min_neurons, min_spikes, min_swr, neurons, normalized, run_time,
                speed_limit, t_smooth, shuffle_id, window, decoding_times, min_proportion_decoded,
                decode_sequences, sequence_len=3, sequence_speed=5., min_epochs=3, random_shuffle=False):
    """Finds decoded for each session.

    Parameters
    ----------
    info: module
    dt: float
    gaussian_std: float
    min_epochs: int
    min_neurons: int
    min_spikes: int
    min_swr: int
    neurons: nept.Neurons
    normalized: boolean
    run_time: boolean
    speed_limit: float
    sequence_speed: float
    shuffle_id: boolean
    window: float
    min_proportion_decoded: float
    decoding_times: nept.Epoch
    decode_sequences: bool
    sequence_len: int
    sequence_speed: float
    random_shuffle: bool

    Returns
    -------
    decoded: nept.Position
    decoded_epochs: nept.Epoch
    errors: np.array

    """
    if decoding_times.n_epochs != 1:
        raise AssertionError("decoding_times must only contain one epoch (start, stop)")

    events, position, all_spikes, lfp, lfp_theta = get_data(info)
    xedges, yedges = nept.get_xyedges(position, binsize=8)

    sliced_spikes = neurons.time_slice(decoding_times.start, decoding_times.stop)
    position = position.time_slice(decoding_times.start, decoding_times.stop)

    sliced_spikes = sliced_spikes.spikes

    if shuffle_id:
        tuning_curves = np.random.permutation(neurons.tuning_curves)
    else:
        tuning_curves = neurons.tuning_curves

    if run_time:
        # limit position to only times when the subject is moving faster than a certain threshold
        run_epoch = nept.run_threshold(position, thresh=speed_limit, t_smooth=t_smooth)
        position = position[run_epoch]

        epochs_interest = nept.Epoch(position.time[0], position.time[-1])
    else:
        sliced_lfp = lfp.time_slice(t_start, t_stop)

        z_thresh = 3.0
        power_thresh = 5.0
        merge_thresh = 0.02
        min_length = 0.05
        swrs = nept.detect_swr_hilbert(sliced_lfp, fs=info.fs, thresh=(140.0, 250.0), z_thresh=z_thresh,
                                       merge_thresh=merge_thresh, min_length=min_length)

        min_involved = 4
        multi_swr = nept.find_multi_in_epochs(sliced_spikes, swrs, min_involved=min_involved)

        if multi_swr.n_epochs < min_swr:
            epochs_interest = nept.Epoch([], [])
        else:
            epochs_interest = multi_swr

        # print('sharp-wave ripples, total:', swrs.n_epochs)
        # print('sharp-wave ripples, min', min_involved, 'neurons :', multi_swr.n_epochs)
        # print('sharp-wave ripples, used :', epochs_interest.n_epochs)
        # print('sharp-wave ripples, mean durations: ', np.mean(epochs_interest.durations))

    counts = nept.bin_spikes(sliced_spikes, position.time, dt=dt, window=window,
                             gaussian_std=gaussian_std, normalized=normalized)

    tc_shape = tuning_curves.shape
    decoding_tc = tuning_curves.reshape(tc_shape[0], tc_shape[1] * tc_shape[2])

    likelihood = nept.bayesian_prob(counts, decoding_tc, window, min_neurons=min_neurons, min_spikes=min_spikes)

    xcenters, ycenters = get_bin_centers(info)
    xy_centers = nept.cartesian(xcenters, ycenters)

    decoded = nept.decode_location(likelihood, xy_centers, counts.time)
    nan_idx = np.logical_and(np.isnan(decoded.x), np.isnan(decoded.y))
    decoded = decoded[~nan_idx]

    if random_shuffle:
        random_x = [np.random.choice(decoded.x) for val in decoded.x]
        random_y = [np.random.choice(decoded.y) for val in decoded.y]

        decoded = nept.Position(np.array([random_x, random_y]).T, decoded.time)

    if decode_sequences:
        print('decoded n_samples before sequence:', decoded.n_samples)
        sequences = nept.remove_teleports(decoded, speed_thresh=sequence_speed, min_length=sequence_len)
        decoded_epochs = epochs_interest.intersect(sequences)
        decoded_epochs = decoded_epochs.expand(0.002)

        if decoded_epochs.n_epochs < min_epochs:
            decoded = nept.Position(np.array([]), np.array([]))
        else:
            decoded = decoded[decoded_epochs]

        print('decoded n_samples after sequence:', decoded.n_samples)
    else:
        decoded_epochs = epochs_interest

    f_xy = scipy.interpolate.interp1d(position.time, position.data.T, kind="nearest")
    decoded_xy = f_xy(decoded.time)
    actual_position = nept.Position(np.hstack((decoded_xy[0][..., np.newaxis],
                                               decoded_xy[1][..., np.newaxis])),
                                    decoded.time)

    if decoded.n_samples > 0:
        errors = actual_position.distance(decoded)
    else:
        errors = np.array([])

    percent_decoded = (decoded.n_samples/counts.n_samples)*100

    if (decoded.n_samples/counts.n_samples) < min_proportion_decoded:
        print("Decoded bins make up %d%% of possible bins ..."
              "removing due to too few bins" % percent_decoded)

        decoded = nept.Position([np.array([]), np.array([])], np.array([]))
        decoded_epochs = nept.Epoch([np.array([]), np.array([])])
        errors = np.array([])
        actual_position = nept.Position([np.array([]), np.array([])], np.array([]))

    return decoded, decoded_epochs, errors, actual_position, likelihood, percent_decoded
Ejemplo n.º 15
0
def get_decoded(info, position, spikes, shuffled_id):

    phase = info.task_times["phase3"]
    sliced_position = position.time_slice(phase.start, phase.stop)

    # trials = get_trials(events, phase)
    trials = get_matched_trials(info, sliced_position)

    error_byactual_position = np.zeros((len(info.yedges), len(info.xedges)))
    n_byactual_position = np.ones((len(info.yedges), len(info.xedges)))

    session_n_active = []
    session_likelihoods = []
    session_decoded = []
    session_actual = []
    session_errors = []
    n_timebins = []

    for trial in trials:
        epoch_of_interest = phase.excludes(trial)

        tuning_curves = get_only_tuning_curves(info,
                                               position,
                                               spikes,
                                               epoch_of_interest)

        if shuffled_id:
            tuning_curves = np.random.permutation(tuning_curves)

        sliced_position = position.time_slice(trial.start, trial.stop)

        sliced_spikes = [spiketrain.time_slice(trial.start,
                                               trial.stop) for spiketrain in spikes]

        # limit position to only times when the subject is moving faster than a certain threshold
        run_epoch = nept.run_threshold(sliced_position, thresh=10., t_smooth=0.8)
        sliced_position = sliced_position[run_epoch]

        sliced_spikes = [spiketrain.time_slice(run_epoch.start,
                                               run_epoch.stop) for spiketrain in sliced_spikes]

        # epochs_interest = nept.Epoch(sliced_position.time[0], sliced_position.time[-1])

        t_window = 0.025  # 0.1 for running, 0.025 for swr

        counts = nept.bin_spikes(sliced_spikes, sliced_position.time, dt=t_window, window=t_window,
                                 gaussian_std=0.0075, normalized=False)

        n_timebins.append(len(counts.time))

        min_neurons = 3

        tc_shape = tuning_curves.shape
        decoding_tc = tuning_curves.reshape(tc_shape[0], tc_shape[1] * tc_shape[2])

        likelihood = nept.bayesian_prob(counts, decoding_tc, binsize=t_window, min_neurons=min_neurons, min_spikes=1)

        # Find decoded location based on max likelihood for each valid timestep
        xcenters, ycenters = get_bin_centers(info)
        xy_centers = nept.cartesian(xcenters, ycenters)
        decoded = nept.decode_location(likelihood, xy_centers, counts.time)

        session_decoded.append(decoded)

        # Remove nans from likelihood and reshape for plotting
        keep_idx = np.sum(np.isnan(likelihood), axis=1) < likelihood.shape[1]
        likelihood = likelihood[keep_idx]
        likelihood = likelihood.reshape(np.shape(likelihood)[0], tc_shape[1], tc_shape[2])

        session_likelihoods.append(likelihood)

        n_active_neurons = np.asarray([n_active if n_active >= min_neurons else 0
                                       for n_active in np.sum(counts.data >= 1, axis=1)])
        n_active_neurons = n_active_neurons[keep_idx]
        session_n_active.append(n_active_neurons)

        f_xy = scipy.interpolate.interp1d(sliced_position.time, sliced_position.data.T, kind="nearest")
        counts_xy = f_xy(decoded.time)
        true_position = nept.Position(np.hstack((counts_xy[0][..., np.newaxis],
                                                 counts_xy[1][..., np.newaxis])),
                                      decoded.time)

        session_actual.append(true_position)

        trial_errors = true_position.distance(decoded)
        session_errors.append(trial_errors)

    return session_decoded, session_actual, session_likelihoods, session_errors, session_n_active, n_timebins