コード例 #1
0
def get_tau_R_and_R_tot(T_0, setup, regularization_method, recorded_system, rec_length, neuron_index, CODE_DIR, use_settings_path):
    ANALYSIS_DIR, analysis_num_str, R_tot, T_D, T, R, R_CI_lo, R_CI_hi = plots.load_analysis_results(
        recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = regularization_method, use_settings_path = use_settings_path)
    R_tot = plots.get_R_tot(T, R, R_CI_lo)[0]
    dR = plots.get_dR(T,R,R_tot)
    tau_R = plots.get_T_avg(T, dR, T_0)
    return tau_R, R_tot
コード例 #2
0
def get_tau_R(spikes, past, l, R_tot):
    T = np.arange(1, l + 1)
    R_arr = []
    for t in T:
        R_plugin = get_R_plugin(spikes, past, t)[0]
        R_arr += [R_plugin]
    dR_arr = plots.get_dR(T, R_arr, R_tot)
    # add 0.5 because get_T_avg averags points in the center of bins, but here the smallest time step is the bin size so we want to average at the edges
    tau_R = plots.get_T_avg(T, dR_arr, 0) + 0.5
    return tau_R, R_arr, dR_arr, T
コード例 #3
0
def get_tau_lagged_MI(spikes, max_steps):
    T_lagged_MI = np.arange(1, max_steps)
    lagged_MI_arr = []
    for t in T_lagged_MI:
        lagged_MI = get_lagged_MI(spikes, t)
        lagged_MI_arr += [lagged_MI]
    # add 0.5 because get_T_avg averags points in the center of bins, but here the smallest time step is the bin size so we want to average at the edges
    tau_lagged_MI = plots.get_T_avg(T_lagged_MI, np.array(lagged_MI_arr),
                                    0) + 0.5
    return tau_lagged_MI, lagged_MI_arr, T_lagged_MI
コード例 #4
0
def get_auto_correlation_time(spikes, min_steps, max_steps, bin_size_ms):
    rk = mre.coefficients(spikes, dt=bin_size_ms, steps=(min_steps, max_steps))
    T = (rk.steps - rk.steps[0] + 1) * bin_size_ms
    fit = mre.fit(rk, steps=(min_steps, max_steps), fitfunc=mre.f_exponential)
    tau_C = fit.tau
    C_raw = rk.coefficients
    range = int(6 * tau_C)
    tau_C_int = plots.get_T_avg(T[:range + 1], C_raw[:range + 1], 0)
    print(tau_C, tau_C_int)
    return tau_C_int, rk, T, fit
コード例 #5
0
recorded_system = 'glif_1s_kernel'
rec_length = '90min'
sample_index = 4
use_settings_path = False
T_0 = 0.0997
"""Load data """
# load estimate of ground truth
R_tot_true = np.load('{}/analysis/{}/R_tot_900min.npy'.format(
    CODE_DIR, recorded_system))
T_true, R_true = plots.load_analysis_results_glm_Simulation(
    CODE_DIR, recorded_system, use_settings_path)
T_true = np.append(T_true, [1.0, 3.0])
R_true = np.append(R_true, [R_tot_true, R_tot_true])
dR_true = plots.get_dR(T_true, R_true, R_tot_true)
tau_R_true = plots.get_T_avg(T_true, dR_true, T_0)

# Load settings from yaml file
setup = 'full_bbc'

ANALYSIS_DIR, analysis_num_str, R_tot_bbc, T_D_bbc, T, R_bbc, R_bbc_CI_lo, R_bbc_CI_hi = plots.load_analysis_results(
    recorded_system,
    rec_length,
    sample_index,
    setup,
    CODE_DIR,
    regularization_method='bbc',
    use_settings_path=use_settings_path)

R_tot_bbc, T_D_index_bbc, max_valid_index_bbc = plots.get_R_tot(
    T, R_bbc, R_bbc_CI_lo)
コード例 #6
0
"""Load data full"""
setup = 'full_bbc'
ANALYSIS_DIR, analysis_num_str, R_tot_bbc, T_D_bbc, T, R_bbc, R_bbc_CI_lo, R_bbc_CI_hi = plots.load_analysis_results(
    recorded_system,
    rec_length,
    neuron_index,
    setup,
    CODE_DIR,
    regularization_method='bbc',
    use_settings_path=use_settings_path)
R_tot_bbc, T_D_index_bbc, max_valid_index_bbc = plots.get_R_tot(
    T, R_bbc, R_bbc_CI_lo)
dR_bbc = plots.get_dR(T, R_bbc, R_tot_bbc)
T = T * 1000  # tranform measures to ms
T_D_bbc = T_D_bbc * 1000
tau_R_bbc = plots.get_T_avg(T, dR_bbc, T_0)

# Get R_tot_glm for T_D
R_tot_glm = plots.load_analysis_results_glm(ANALYSIS_DIR, analysis_num_str)

setup = 'full_shuffling'
ANALYSIS_DIR, analysis_num_str, R_tot_shuffling, T_D_shuffling, T, R_shuffling, R_shuffling_CI_lo, R_shuffling_CI_hi = plots.load_analysis_results(
    recorded_system,
    rec_length,
    neuron_index,
    setup,
    CODE_DIR,
    regularization_method='shuffling',
    use_settings_path=use_settings_path)
R_tot_shuffling, T_D_index_shuffling, max_valid_index_shuffling = plots.get_R_tot(
    T, R_shuffling, R_shuffling_CI_lo)
    else:
        sample_index = tau
    m = np.exp(-bin_size * 1000 / tau)
    ANALYSIS_DIR, analysis_num_str, R_tot, T_D, T_R, R, R_CI_lo, R_CI_hi = plots.load_analysis_results(
        recorded_system,
        rec_length,
        sample_index,
        setup,
        CODE_DIR,
        regularization_method='shuffling',
        use_settings_path=use_settings_path)
    R_tot, T_D_index, max_valid_index = plots.get_R_tot(T_R, R, R_CI_lo)
    dR_arr = plots.get_dR(T_R, R, R_tot)
    # transform T to ms
    T_R_ms = T_R * 1000
    tau_R = plots.get_T_avg(T_R_ms, dR_arr, T_0_ms)
    m_list += [m]
    R_tot_list += [R_tot]
    tau_R_list += [tau_R]
"""Plotting"""
rc('text', usetex=True)
matplotlib.rcParams['font.size'] = '15.0'
matplotlib.rcParams['xtick.labelsize'] = '15'
matplotlib.rcParams['ytick.labelsize'] = '15'
matplotlib.rcParams['legend.fontsize'] = '15'
matplotlib.rcParams['axes.linewidth'] = 0.6

fig, ((ax1, ax2)) = plt.subplots(nrows=2,
                                 ncols=1,
                                 figsize=(2.8, 3.5),
                                 sharex=True)
コード例 #8
0
spiketimes = spiketimes - spiketimes[0]
Trec = spiketimes[-1] - spiketimes[0]
counts_from_sptimes = utl.get_binned_neuron_activity(spiketimes, bin_size)
"""Compute measures"""
# Corr
rk = mre.coefficients(counts_from_sptimes,
                      dt=bin_size_ms,
                      steps=(min_step_autocorrelation,
                             max_step_autocorrelation))
T_C_ms = rk.steps * bin_size_ms
fit = mre.fit(rk, steps=(5, 500), fitfunc=mre.f_exponential_offset)
tau_est = fit.tau
rk_offset = fit.popt[2]
# computing integrated timescale on raw data
C_raw = rk.coefficients - rk_offset
tau_C_raw = plots.get_T_avg(T_C_ms, C_raw, T_0_ms)
# computing integrated timescale on fitted curve
C_fit = mre.f_exponential_offset(rk.steps, fit.tau / bin_size_ms, *
                                 fit.popt[1:]) - rk_offset
tau_C_fit = plots.get_T_avg(T_C_ms, C_fit, T_0_ms)

# R and Delta R
ANALYSIS_DIR, analysis_num_str, R_tot, T_D, T_R, R, R_CI_lo, R_CI_hi = plots.load_analysis_results(
    recorded_system,
    rec_length,
    sample_index,
    setup,
    CODE_DIR,
    regularization_method='shuffling',
    use_settings_path=use_settings_path)
T_R_plotting = np.append([0], T_R) * 1000
コード例 #9
0
    """Load data"""
    # Load settings from yaml file

    ANALYSIS_DIR, analysis_num_str, R_tot_bbc, T_D_bbc, T_bbc, R_bbc, R_bbc_CI_lo, R_bbc_CI_hi = plots.load_analysis_results(
        recorded_system,
        rec_length,
        sample_index,
        bbc_setup,
        CODE_DIR,
        regularization_method='bbc',
        use_settings_path=use_settings_path)

    R_tot_bbc, T_D_index_bbc, max_valid_index_bbc = plots.get_R_tot(
        T_bbc, R_bbc, R_bbc_CI_lo)
    dR_bbc = plots.get_dR(T_bbc, R_bbc, R_tot_bbc)
    tau_R_bbc = plots.get_T_avg(T_bbc, dR_bbc, T_0)

    glm_bbc_csv_file_name = '{}/ANALYSIS{}/glm_benchmark_bbc.csv'.format(
        ANALYSIS_DIR, analysis_num_str)
    glm_bbc_pd = pd.read_csv(glm_bbc_csv_file_name)
    R_glm_bbc = np.array(glm_bbc_pd['R_GLM'])
    T_glm_bbc = np.array(glm_bbc_pd['T'])

    ANALYSIS_DIR, analysis_num_str, R_tot_shuffling, T_D_shuffling, T_shuffling, R_shuffling, R_shuffling_CI_lo, R_shuffling_CI_hi = plots.load_analysis_results(
        recorded_system,
        rec_length,
        sample_index,
        shuffling_setup,
        CODE_DIR,
        regularization_method='shuffling',
        use_settings_path=use_settings_path)
コード例 #10
0
    # '2-303': long-range (in the new validNeurons script index 1)
    # '2-357' : bursty (in the new validNeurons script index 30)

    ANALYSIS_DIR, analysis_num_str, R_tot, T_D, T, R, R_CI_lo, R_CI_hi = plots.load_analysis_results(
        recorded_system,
        rec_length,
        neuron_index,
        setup,
        CODE_DIR,
        regularization_method=regularization_method,
        use_settings_path=use_settings_path)
    R_tot, T_D_index, max_valid_index = plots.get_R_tot(T, R, R_CI_lo)
    dR = plots.get_dR(T, R, R_tot)
    T = T * 1000  # tranform to ms
    T_D = T_D * 1000
    tau_R = plots.get_T_avg(T, dR, T_0)
    """Plot"""

    rc('text', usetex=True)
    matplotlib.rcParams['font.size'] = '16.0'
    matplotlib.rcParams['xtick.labelsize'] = '16'
    matplotlib.rcParams['ytick.labelsize'] = '16'
    matplotlib.rcParams['legend.fontsize'] = '16'
    matplotlib.rcParams['axes.linewidth'] = 0.6
    # Colors
    main_red = sns.color_palette("RdBu_r", 15)[12]
    main_blue = sns.color_palette("RdBu_r", 15)[1]
    soft_red = sns.color_palette("RdBu_r", 15)[10]
    soft_blue = sns.color_palette("RdBu_r", 15)[4]
    violet = sns.cubehelix_palette(8)[4]
    green = sns.cubehelix_palette(8, start=.5, rot=-.75)[3]
コード例 #11
0
spiketimes = spiketimes - spiketimes[0]
Trec = spiketimes[-1] - spiketimes[0]
counts_from_sptimes = utl.get_binned_neuron_activity(spiketimes, bin_size)

"""Compute measures"""
# Corr
rk = mre.coefficients(counts_from_sptimes, dt=bin_size_ms, steps = (min_step_autocorrelation, max_step_autocorrelation))
T_C_ms = rk.steps*bin_size_ms

# R and Delta R
R_tot = np.load('{}/analysis/{}/R_tot_simulation.npy'.format(CODE_DIR, recorded_system))
T_R, R = plots.load_analysis_results_glm_Simulation(CODE_DIR, recorded_system, use_settings_path = use_settings_path)
dR_arr = plots.get_dR(T_R,R ,R_tot)
# transform T to ms
T_R_ms = T_R * 1000
tau_R = plots.get_T_avg(T_R_ms, dR_arr, T_0_ms)
T_R_plotting = np.append([0], T_R_ms)
R_plotting = np.append([0], R)

# lagged mutual information
lagged_MI = np.load('{}/analysis/{}/lagged_MI.npy'.format(CODE_DIR, recorded_system))
tau_L = plots.get_T_avg(T_R_ms, lagged_MI, T_0_ms)


"""Plotting"""
rc('text', usetex=True)
matplotlib.rcParams['font.size'] = '15.0'
matplotlib.rcParams['xtick.labelsize'] = '15'
matplotlib.rcParams['ytick.labelsize'] = '15'
matplotlib.rcParams['legend.fontsize'] = '15'
matplotlib.rcParams['axes.linewidth'] = 0.6
コード例 #12
0
# Neuron '2-338'
neuron_index = 20
"""Load data R"""

ANALYSIS_DIR, analysis_num_str, R_tot, T_D, T_R, R, R_CI_lo, R_CI_hi = plots.load_analysis_results(
    recorded_system,
    rec_length,
    neuron_index,
    setup,
    CODE_DIR,
    regularization_method=regularization_method,
    use_settings_path=use_settings_path)
R_tot, T_D_index, max_valid_index = plots.get_R_tot(T_R, R, R_CI_lo)
dR = plots.get_dR(T_R, R, R_tot)
T_R_ms = T_R * 1000  # tranform measures to ms
tau_R = plots.get_T_avg(T_R_ms, dR, T_0_ms)
T_R_plotting = np.append(np.append([0], T_R_ms), [2000])
dR_plotting = np.append(dR, [0])
R_plotting = np.append(np.append([0], R), [R_tot])
print(R_tot)
"""Load and preprocess data"""
DATA_DIR = '{}/data/neuropixels/Waksman'.format(CODE_DIR)
validNeurons = np.load('{}/validNeurons.npy'.format(DATA_DIR)).astype(int)
neuron = validNeurons[neuron_index]
print(neuron)
spiketimes = np.load('{}/V1/spks/spiketimes-{}-{}.npy'.format(
    DATA_DIR, neuron[0], neuron[1]))

# Add 5 seconds to make sure that only spikes with sufficient spiking history are considered
t_0 = spiketimes[0] + 5.
spiketimes = spiketimes - t_0
コード例 #13
0
mean_tau_R = {}
mean_CI_tau_R = {}

if recorded_system == 'simulation':
    R_tot_true = np.load(
        '{}/analysis/simulation/R_tot_simulation.npy'.format(CODE_DIR))
else:
    R_tot_true = np.load('{}/analysis/{}/R_tot_900min.npy'.format(
        CODE_DIR, recorded_system))
T_true, R_true = plots.load_analysis_results_glm_Simulation(
    CODE_DIR, recorded_system, use_settings_path=use_settings_path)
R_true_running_avg = plots.get_running_avg(R_true)
# dR_true = plots.get_dR(T_true,R_true_running_avg,R_tot_true)[0]
# tau_R_true = plots.get_T_avg(T_true, dR_true, T_0)
dR_true = plots.get_dR(T_true, R_true, R_tot_true)
tau_R_true = plots.get_T_avg(T_true, dR_true, T_0)
print(tau_R_true)

setup = 'full_shuffling'
regularization_method = setup.split("_")[1]
for rec_length in rec_lengths:
    R_tot_arr = []
    tau_R_arr = []
    number_samples = 30
    if rec_length == '45min':
        number_samples = 10
    if rec_length == '90min':
        number_samples = 10
    for sample_index in np.arange(1, number_samples):
        ANALYSIS_DIR, analysis_num_str, R_tot, T_D, T, R, R_CI_lo, R_CI_hi = plots.load_analysis_results(
            recorded_system,
コード例 #14
0
def median_relative_mean_R_tot_and_T_avg(recorded_system, setup, N_neurons, rec_lengths, rec_lengths_Nsamples, CODE_DIR):
    if recorded_system == 'CA1':
        DATA_DIR = '{}/data/CA1/'.format(CODE_DIR)
    if recorded_system == 'retina':
        DATA_DIR = '{}/data/retina/'.format(CODE_DIR)
    if recorded_system == 'culture':
        DATA_DIR = '{}/data/culture/'.format(CODE_DIR)
    validNeurons = np.load(
        '{}validNeurons.npy'.format(DATA_DIR)).astype(int)
    R_tot_relative_mean = {}
    T_avg_relative_mean = {}
    np.random.seed(41)
    neuron_selection = np.random.choice(len(validNeurons), N_neurons,  replace=False)
    for rec_length in rec_lengths:
        # arrays containing R_tot and mean T_avg for different neurons
        R_tot_mean_arr = []
        T_avg_mean_arr = []
        N_samples = rec_lengths_Nsamples[rec_length]
        for j in range(N_neurons):
            neuron_index = neuron_selection[j]
            R_tot_arr = []
            T_avg_arr = []
            for sample_index in range(N_samples):
                # Get run index
                run_index = j * N_samples + sample_index
                """Load data five bins"""
                if not rec_length == '90min':
                    setup_subsampled = '{}_subsampled'.format(setup)
                else:
                    run_index = neuron_index
                    setup_subsampled = setup
                if setup == 'full_bbc':
                    analysis_results = plots.load_analysis_results(
                        recorded_system, rec_length, run_index, setup_subsampled, CODE_DIR, regularization_method='bbc', use_settings_path=use_settings_path)
                else:
                    analysis_results = plots.load_analysis_results(
                        recorded_system, rec_length, run_index, setup_subsampled, CODE_DIR, regularization_method='shuffling', use_settings_path=use_settings_path)
                if not analysis_results == None:
                    ANALYSIS_DIR, analysis_num_str, R_tot, T_D, T, R, R_CI_lo, R_CI_hi = analysis_results
                    if not len(R) == 0:
                        R_tot_analysis_results = plots.get_R_tot(T, R, R_CI_lo)
                        if not R_tot_analysis_results == None:
                            R_tot, T_D_index, max_valid_index = R_tot_analysis_results
                            # R_running_avg = plots.get_running_avg(R)
                            dR = plots.get_dR(T,R,R_tot)
                            T_avg = plots.get_T_avg(T, dR, T_0)
                            T_avg_arr += [T_avg]
                            R_tot_arr += [R_tot]
                        else:
                            print('CI_fail', recorded_system, setup, rec_length, run_index, neuron_index, sample_index)
                    else:
                        print('no valid embeddings', recorded_system, rec_length,  setup, analysis_num_str)
                else:
                    print('analysis_fail', recorded_system, rec_length, setup, run_index, neuron_index, sample_index)
            R_tot_mean_arr += [np.mean(R_tot_arr)]
            T_avg_mean_arr += [np.mean(T_avg_arr)]
        R_tot_relative_mean[rec_length] = np.array(R_tot_mean_arr)
        T_avg_relative_mean[rec_length] = np.array(T_avg_mean_arr)

    median_R_tot_relative_mean = []
    median_CI_R_tot_relative_mean = []
    median_T_avg_relative_mean = []
    median_CI_T_avg_relative_mean = []
    for rec_length in rec_lengths:
        R_tot_relative_mean_arr = R_tot_relative_mean[rec_length] / R_tot_relative_mean['90min']*100
        T_avg_relative_mean_arr = T_avg_relative_mean[rec_length] / T_avg_relative_mean['90min']*100
        # If no valid embeddings were found for BBC for all samples, the mean is nan so the neuron is not considered in the median operation
        R_tot_relative_mean_arr = R_tot_relative_mean_arr[~np.isnan(R_tot_relative_mean_arr)]
        T_avg_relative_mean_arr = T_avg_relative_mean_arr[~np.isnan(T_avg_relative_mean_arr)]
        # Computing the median and 95% CIs over the 10 neurons
        median_R_tot_relative_mean += [np.median(R_tot_relative_mean_arr)]
        median_CI_R_tot_relative_mean += [plots.get_CI_median(R_tot_relative_mean_arr)]
        median_T_avg_relative_mean += [np.median(T_avg_relative_mean_arr)]
        median_CI_T_avg_relative_mean += [plots.get_CI_median(T_avg_relative_mean_arr)]
    return np.array(median_R_tot_relative_mean), np.array(median_CI_R_tot_relative_mean), np.array(median_T_avg_relative_mean), np.array(median_CI_T_avg_relative_mean)