import alf
import numpy as np
import matplotlib.pyplot as plt
from ephys_functions import paths, check_trials
import brainbox.io.one as bbone
from oneibl.one import ONE
one = ONE()

# Settings
REGION = 'BLA'
TEST_PRE_TIME = 0.6
TEST_POST_TIME = -0.1
PLOT_PRE_TIME = 0.5
PLOT_POST_TIME = 1
ALPHA = 0.1
FIG_PATH = paths()[1]

# Query sessions with at least one channel in the region of interest
ses = one.alyx.rest('sessions',
                    'list',
                    atlas_acronym=REGION,
                    task_protocol='_iblrig_tasks_ephysChoiceWorld',
                    project='ibl_neuropixel_brainwide')

# Make directory for region
if not isdir(join(FIG_PATH, 'PSTH', 'Block', REGION)):
    mkdir(join(FIG_PATH, 'PSTH', 'Block', REGION))

# Loop over sessions
for i, eid in enumerate([j['url'][-36:] for j in ses]):
    print('Processing session %d of %d' % (i + 1, len(ses)))
示例#2
0
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.optimize import curve_fit
from scipy.stats import pearsonr
from ephys_functions import paths, figure_style
import brainbox.io.one as bbone
from oneibl.one import ONE
one = ONE()

# Settings
BIN_SIZE = 50  # in ms
BIN_START = np.arange(-1050, -50, 50)  # ms relative to go cue
MIN_NEURONS = 10
DATA_PATH, FIG_PATH, SAVE_PATH = paths()
FIG_PATH = join(FIG_PATH, 'WholeBrain')


def exponential_decay(x, A, tau, B):
    y = (A * np.exp(-(x / tau))) + B
    return y


def _get_spike_counts_in_bins(spike_times, spike_clusters, intervals):
    """
    Return the number of spikes in a sequence of time intervals, for each neuron.

    Parameters
    ----------
    spike_times : 1D array
示例#3
0
    n_neurons = len(cluster_ids)
    n_intervals = intervals.shape[0]
    counts = np.zeros((n_neurons, n_intervals), dtype=np.uint32)
    for j in range(n_intervals):
        t0, t1 = intervals[j, :]
        # Count the number of spikes in the window, for each neuron.
        x = np.bincount(spike_clusters[intervals_idx[j, 0]:intervals_idx[j,
                                                                         1]],
                        minlength=cluster_ids.max() + 1)
        counts[:, j] = x[cluster_ids]
    return counts, cluster_ids


# %%
# Set path to save plots
DATA_PATH, FIG_PATH, _ = paths()
FIG_PATH = join(FIG_PATH, 'TimeConstant')

# Load in data
eids = one.search(subject=SUBJECT, date_range=DATE)
spikes, clusters, channels = bbone.load_spike_sorting_with_channel(eids[0],
                                                                   one=one)
trials = one.load_object(eids[0], 'trials')

# Only use single units
probe = 'probe' + PROBE
spikes[probe].times = spikes[probe].times[np.isin(
    spikes[probe].clusters, clusters[probe].metrics.cluster_id[
        clusters[probe].metrics.ks2_label == 'good'])]
spikes[probe].clusters = spikes[probe].clusters[np.isin(
    spikes[probe].clusters, clusters[probe].metrics.cluster_id[