예제 #1
0
def burstiness_factor_g_BK_statistics(A, dt):
    """
    Calculate the mean and standard deviation of the burstiness factor
    for a given area and timestep for three values of g_BK.

    Parameters
    ----------
    A : {int, float}
        Area to use in the model, in cm^2.
    dt : {int, float}
        Timestep used in the model, in ms.

    Returns
    ----------
    results : tuple
        A tuple of the results, on the form:
        ((mean G_BK = 0, std G_BK = 0),
         (mean G_BK = 0.5, std G_BK = 0.5),
         (mean G_BK = 1, std G_BK = 1)).
    """
    # G_BK = 0
    g_BK = scale_conductance(0, A)
    mean_0, std_0 = burstiness_factors(g_BK, A, dt)

    # G_BK = 0.5
    g_BK = scale_conductance(0.5, A)
    mean_05, std_05 = burstiness_factors(g_BK, A, dt)

    # G_BK = 1
    g_BK = scale_conductance(1, A)
    mean_1, std_1 = burstiness_factors(g_BK, A, dt)

    results = ((mean_0, std_0), (mean_05, std_05), (mean_1, std_1))

    return results
예제 #2
0
def change_tau_BK():
    """
    Change tau_BK values and calculate the burstiness factor for each.

    Returns
    -------
    tau_BKs : numpy.array
        The tau_BK values in ms.
    burstiness_factors : list
        The burstiness factor for each tau_BK, where the burstiness factor is
        the fraction of events that is above `burst_threshold`.

    Notes
    -----
    Uses original G_BK = 1.
    """
    tau_BKs = np.array([2, 4, 5, 6, 7, 8, 10])

    burstiness_factors = []

    # Original values (scaled to the new model)
    parameters = scale_tabak(A)
    parameters["g_BK"] = scale_conductance(1, A)

    for tau_BK in tau_BKs:
        bins, frequency, burstiness_factor = calculate_frequency_bf(tau_BK=tau_BK,
                                                                    A=A,
                                                                    dt=dt,
                                                                    **parameters)
        burstiness_factors.append(burstiness_factor)

    return tau_BKs, burstiness_factors
예제 #3
0
def change_g_BK():
    """
    Change g_BK values and calculate the burstiness factor for each.

    Returns
    -------
    G_BKs : numpy.array
        The g_BK values in nS.
    burstiness_factors : list
        The burstiness factor for each G_BK, where the burstiness factor is
        the fraction of events that is above `burst_threshold`.
    """
    G_BKs = np.array([0, 0.2, 0.4, 0.5, 0.6, 0.8, 1])

    g_BKs = scale_conductance(G_BKs, A)

    parameters = scale_tabak(A)
    del parameters["g_BK"]

    burstiness_factors = []

    for g_BK in g_BKs:
        bins, frequency, burstiness_factor = calculate_frequency_bf(g_BK=g_BK,
                                                                    A=A,
                                                                    dt=dt,
                                                                    **parameters)
        burstiness_factors.append(burstiness_factor)

    return G_BKs, burstiness_factors
def perform_uncertainty_analysis():
    """
    Perform an uncertainty quantification and sensitivity analysis of the model.

    Returns
    -------
    data : uncertainpy.Data object
        The results from the uncertainty quantification.
    """
    parameters = {
        "g_K": scale_conductance(G_K),
        "g_Ca": scale_conductance(G_Ca),
        "g_SK": scale_conductance(G_SK),
        "g_l": scale_conductance(G_l),
        "g_BK": scale_conductance(0.67)
    }  # Temporary value

    parameters = un.Parameters(parameters)

    # Set all conductances to have a uniform distribution
    # within a +/- 50% interval around their original value
    parameters.set_all_distributions(un.uniform(1))

    parameters["g_BK"].distribution = cp.Uniform(scale_conductance(0),
                                                 scale_conductance(1))

    # Initialize features with the correct algorithm
    features = un.SpikingFeatures(new_features=burstiness_factor,
                                  strict=False,
                                  logger_level="error",
                                  features_to_run=features_to_run,
                                  threshold=0.55,
                                  end_threshold=-0.1,
                                  normalize=True,
                                  trim=False,
                                  min_amplitude=min_spike_amplitude)

    # Initialize the model and define default options
    model = un.NeuronModel(file="tabak.py",
                           name="tabak",
                           discard=discard,
                           noise_amplitude=noise_amplitude,
                           simulation_time=simulation_time,
                           ignore=True)

    # Perform the uncertainty quantification
    UQ = un.UncertaintyQuantification(model,
                                      parameters=parameters,
                                      features=features)

    # We set the seed to be able to reproduce the result
    data = UQ.quantify(seed=10,
                       plot=None,
                       data_folder=data_folder,
                       polynomial_order=polynomial_order)

    return data
예제 #5
0
def generate_data_area(areas):
    """
    Calculate the burstiness factor for the model for various areas.

    Parameters
    ----------
    areas : list
        Areas to use in the model, in cm^2.

    Returns
    -------
    bins : list
        Bins for the binned burstiness factors for each area.
    binned_burstiness_factors : list
        Binned burstiness factors for each area.
    """
    bins = []
    binned_burstiness_factors = []

    with open(os.path.join(data_folder, output_file_area), "w") as output:
        for i, A in enumerate(areas):
            print("Running for A = {}".format(A))

            g_BK = scale_conductance(1, A)

            tmp_bins, tmp_binned_burstiness_factors, bursters, spikers \
                = robustness(g_BK=g_BK,
                             A=A,
                             dt=dt_default)

            bins.append(tmp_bins)
            binned_burstiness_factors.append(tmp_binned_burstiness_factors)

            output.write("A = {}\n".format(A))
            output.write("Spikers = {}\n".format(spikers))
            output.write("Bursters = {}\n\n".format(bursters))

            np.save(os.path.join(data_folder, "bins_{}_area".format(i)), tmp_bins)
            np.save(os.path.join(data_folder, "binned_burstiness_factors_{}_area".format(i)), tmp_binned_burstiness_factors)


    return bins, binned_burstiness_factors
예제 #6
0
def generate_data_dt(dts):
    """
    Calculate the burstiness factor for the model for various timesteps.

    Parameters
    ----------
    dts : list
        Timesteps to use in the model, in ms.

    Returns
    -------
    bins : list
        Bins for the binned burstiness factors for each timestep.
    binned_burstiness_factors : list
        Binned burstiness factors for each timestep.
    """
    bins = []
    binned_burstiness_factors = []

    with open(os.path.join(data_folder, output_file_dt), "w") as output:
        for i, dt in enumerate(dts):
            print("Running for dt = {}".format(dt))
            g_BK = scale_conductance(1, A_default)
            tmp_bins, tmp_binned_burstiness_factors, bursters, spikers \
                = robustness(g_BK=g_BK, A=A_default, dt=dt)

            bins.append(tmp_bins)
            binned_burstiness_factors.append(tmp_binned_burstiness_factors)

            output.write("dt = {}\n".format(dt))
            output.write("Spikers = {}\n".format(spikers))
            output.write("Bursters = {}\n\n".format(bursters))

            np.save(os.path.join(data_folder, "bins_{}_dt".format(i)), tmp_bins)
            np.save(os.path.join(data_folder, "binned_burstiness_factors_{}_dt".format(i)), tmp_binned_burstiness_factors)


    return bins, binned_burstiness_factors
예제 #7
0
def generate_data_figure_2():
    """
    Reproduce the data for figure 2 in Tabak et. al. 2011.

    Returns
    -------
    bins_0 : array
        Bins for the binned burstiness factors when G_BK = 0.
    bins_05 : array
        Bins for the binned burstiness factors when G_BK = 05.
    bins_1 : array
        Bins for the binned burstiness factors when G_BK = 1.
    binned_burstiness_factors_0 : array
        Binned burstiness factors when G_BK = 0.
    binned_burstiness_factors_05 : array
        Binned burstiness factors when G_BK = 0.5.
    binned_burstiness_factors_1 : array
        Binned burstiness factors when G_BK = 1.

    Notes
    -----
    http://www.jneurosci.org/content/31/46/16855/tab-article-info
    """
    # Run model for various G_BK values
    # G_BK = 0
    print("Running for G_BK = 0")

    g_BK = scale_conductance(0, A)
    bins_0, binned_burstiness_factors_0, bursters_0, spikers_0 = robustness(g_BK=g_BK, dt=dt)

    # G_BK = 0.5
    print("Running for G_BK = 0.5")

    g_BK = scale_conductance(0.5, A)
    bins_05, binned_burstiness_factors_05, bursters_05, spikers_05 = robustness(g_BK=g_BK, dt=dt)

    # G_BK = 1
    print("Running for G_BK = 1")

    g_BK = scale_conductance(1, A)
    bins_1, binned_burstiness_factors_1, bursters_1, spikers_1 = robustness(g_BK=g_BK, dt=dt)


    # Write percentage of events as spikers and bursters to file
    with open(os.path.join(data_folder, output_file), "w") as output:
        output.write("G_BK = 0\n")
        output.write("Spikers = {}\n".format(spikers_0))
        output.write("Bursters = {}\n\n".format(bursters_0))

        output.write("G_BK = 0.5\n")
        output.write("Spikers = {}\n".format(spikers_05))
        output.write("Bursters = {}\n\n".format(bursters_05))

        output.write("G_BK = 1\n")
        output.write("Spikers = {}\n".format(spikers_1))
        output.write("Bursters = {}\n".format(bursters_1))


    np.save(os.path.join(data_folder, "bins_0"), bins_0)
    np.save(os.path.join(data_folder, "bins_05"), bins_05)
    np.save(os.path.join(data_folder, "bins_1"), bins_1)
    np.save(os.path.join(data_folder, "binned_burstiness_factors_0"), binned_burstiness_factors_0)
    np.save(os.path.join(data_folder, "binned_burstiness_factors_05"), binned_burstiness_factors_05)
    np.save(os.path.join(data_folder, "binned_burstiness_factors_1"), binned_burstiness_factors_1)

    return bins_0, bins_05, bins_1, binned_burstiness_factors_0, binned_burstiness_factors_05, binned_burstiness_factors_1
예제 #8
0
def figure_1():
    """
    Reproduce figure 1 in Tabak et. al. 2011. Figure is saved as figure_1

    http://www.jneurosci.org/content/31/46/16855/tab-article-info
    """
    print("Reproducing figure 1 in Tabak et. al. 2011")

    parameters = scale_tabak(A)

    # G_BK = 0
    parameters["g_BK"] = scale_conductance(0, A)

    time_0, V_0 = tabak(noise_amplitude=noise_amplitude,
                        discard=discard,
                        simulation_time=simulation_time,
                        A=A,
                        dt=dt,
                        **parameters)

    bins_0, frequency_0, burstiness_factor_0 = calculate_frequency_bf(A=A,
                                                                      dt=dt,
                                                                      **parameters)

    # G_BK = 0.5
    parameters["g_BK"] = scale_conductance(0.5, A)

    time_05, V_05 = tabak(noise_amplitude=noise_amplitude,
                          discard=discard,
                          simulation_time=simulation_time,
                          A=A,
                          dt=dt,
                          **parameters)

    bins_05, frequency_05, burstiness_factor_05 = calculate_frequency_bf(A=A,
                                                                         dt=dt,
                                                                         **parameters)


    # G_BK = 1
    parameters["g_BK"] = scale_conductance(1, A)
    time_1, V_1 = tabak(noise_amplitude=noise_amplitude,
                        discard=discard,
                        simulation_time=simulation_time,
                        A=A,
                        dt=dt,
                        **parameters)

    bins_1, frequency_1, burstiness_factor_1 = calculate_frequency_bf(A=A,
                                                                      dt=dt,
                                                                      **parameters)


    # Calculate results for figure 1D
    scaled_g_BKs, burstiness_factors_g_BK = change_g_BK()

    # Calculate results for figure 1E
    scaled_tau_BK, burstiness_factors_tau_BK = change_tau_BK()

    # "Remove" the discarded time to plot from 0
    time_0 -= discard
    time_05 -= discard
    time_1 -= discard

    # Rescale from ms to s
    time_0 /= 1000
    time_05 /= 1000
    time_1 /= 1000
    bins_0 /= 1000
    bins_05 /= 1000
    bins_1 /= 1000
    burst_threshold_scaled = burst_threshold/1000
    simulation_time_plot_scaled = simulation_time_plot/1000
    discard_scaled = discard/1000


    # Plot the data
    plt.rcParams.update(params)

    plt.figure(figsize=(figure_width, figure_width))
    gs = plt.GridSpec(4, 6)
    ax1 = plt.subplot(gs[0, :-2])
    ax2 = plt.subplot(gs[1, :-2])
    ax3 = plt.subplot(gs[2, :-2])

    ax4 = plt.subplot(gs[0, -2:])
    ax5 = plt.subplot(gs[1, -2:])
    ax6 = plt.subplot(gs[2, -2:])

    ax7 = plt.subplot(gs[3, :3])
    ax8 = plt.subplot(gs[3, 3:])


    voltage_axes = [ax1, ax2, ax3]
    burst_axes = [ax4, ax5, ax6]

    ax1.plot(time_0, V_0)
    title = r"$G_{\mathrm{BK}} = 0 $ nS"
    ax1.set_title(title,  fontweight=fontweight)
    ax1.text(label_x,
             label_y,
             "A",
             transform=ax1.transAxes,
             fontsize=titlesize,
             fontweight=plot_label_weight)

    ax2.plot(time_05, V_05)
    title = r"$G_{\mathrm{BK}} = 0.5 $ nS"
    ax2.set_title(title, fontweight=fontweight)
    ax2.text(label_x,
             label_y,
             "B",
             transform=ax2.transAxes,
             fontsize=titlesize,
             fontweight=plot_label_weight)

    ax3.plot(time_1, V_1)
    title = r"$G_{\mathrm{BK}} = 1$ nS"
    ax3.set_title(title, fontweight=fontweight)
    ax3.set_xlabel("Time (s)", fontsize=labelsize, fontweight=fontweight)
    ax3.text(label_x, label_y, "C", transform=ax3.transAxes, fontsize=titlesize, fontweight=plot_label_weight)


    yticks = [-60, -40, -20, 0]
    xticks = [0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5]

    for ax in voltage_axes:
        ax.set_ylabel("V (mV)", fontweight=fontweight)
        ax.set_ylim([-70, 10])
        ax.set_xlim([0, simulation_time_plot_scaled - discard_scaled])
        ax.set_yticks(yticks)
        ax.set_xticks(xticks)
        ax.tick_params(axis="both", which="major", labelsize=fontsize, labelcolor="black")


    ax4.bar(bins_0[:-1], frequency_0, width=(bins_0[1] - bins_0[0]), align="edge")
    ax4.text(0.1, 0.8, "BF = {}".format(burstiness_factor_0), fontsize=labelsize)

    ax5.bar(bins_05[:-1], frequency_05, width=(bins_05[1] - bins_05[0]), align="edge")
    ax5.text(0.1, 0.8, "BF = {:.2f}".format(burstiness_factor_05), fontsize=labelsize)
    ax5.text(0.002, 0.4, "Spikes", fontsize=8)
    ax5.text(0.1, 0.4, "Bursts", fontsize=8)

    ax6.bar(bins_1[:-1], frequency_1, width=(bins_1[1] - bins_1[0]), align="edge")
    ax6.text(0.1, 0.8, "BF = {:.2f}".format(burstiness_factor_1), fontsize=labelsize)

    yticks = [0, 0.2, 0.4, 0.6, 0.8, 1]
    xticks = [0, 0.05, 0.1, 0.15, 0.2]


    for ax in burst_axes:
        ax.axvline(burst_threshold_scaled, color=axis_grey)
        ax.set_ylim([0, 1])
        ax.set_xlim([0, .23])
        ax.set_yticks(yticks)
        ax.set_xticks(xticks)
        ax.set_ylabel("Frequency", fontweight=fontweight)
        ax.tick_params(axis="both", which="major", labelsize=fontsize, labelcolor="black")

    ax6.set_xlabel("Event duration (s)", fontweight=fontweight)


    ax7.plot(scaled_g_BKs, burstiness_factors_g_BK, marker=".")
    ax7.set_xlabel(r"$G_{\mathrm{BK}}$ (nS)", fontweight=fontweight)
    ax7.set_ylabel("Burstiness", fontweight=fontweight)
    ax7.tick_params(axis="both", which="major", labelsize=fontsize, labelcolor="black")
    ax7.set_yticks(yticks)
    ax7.set_ylim([-0.05, 1.05])


    ax8.plot(scaled_tau_BK, burstiness_factors_tau_BK, marker=".")
    ax8.set_xlabel(r"$\tau_{\mathrm{BK}}$ (ms)", fontweight=fontweight)
    ax8.set_ylabel("Burstiness", fontweight=fontweight)
    ax8.set_yticks(yticks)
    ax8.tick_params(axis="both", which="major", labelsize=fontsize, labelcolor="black")
    ax8.set_ylim([-0.05, 1.05])
    ax8.set_xlim([2, 10])

    ax7.text(label_x,
             label_y,
             "D",
             transform=ax7.transAxes,
             fontsize=titlesize,
             fontweight=plot_label_weight)
    ax8.text(label_x,
             label_y,
             "E",
             transform=ax8.transAxes,
             fontsize=titlesize,
             fontweight=plot_label_weight)

    plt.tight_layout()

    plt.savefig(os.path.join(figure_folder, "figure_1" + figure_format))
예제 #9
0
def robustness(g_BK=0, A=3.1415927e-6, dt=0.01):
    """
    Calculate the number of occurrences for binned burstiness factor of several
    model runs with varying conductances (except g_BK).

    Parameters
    ----------
    g_BKs : float
        The value of the g_BK conductance in S/cm^2.
    A : float, optional
        Area of the neuron cell, in cm^2. Default is 3.1415927e-6.
    dt : float, optional
        Time step of the simulation. Only used when there is noise,
        otherwise adaptive time steps is used. Default is 0.01.

    Returns
    -------
    bins : numpy.array
        The bins for the burstiness.
    binned_burstiness_factors : numpy.array
        The number of model evaluations with burstiness factor corresponding to
        each bin.
    spikers : float
        Fraction of model evaluations with results that have
        burstiness factor < 0.3.
    bursters : float
        Fraction of model evaluations with results that have
        burstiness factor > 0.5.
    """
    bins = 10
    hist_range = (0, 1)

    # Original values (scaled to the new model)
    g_l_scaled = scale_conductance(G_l, A)
    g_K_scaled = scale_conductance(G_K, A)
    g_Ca_scaled = scale_conductance(G_Ca, A)
    g_SK_scaled = scale_conductance(G_SK, A)

    # Draw conductances from uniform distributions +/- 50% of their original values
    g_K = np.random.uniform(g_K_scaled*0.5, g_K_scaled*1.5, robustness_reruns)
    g_Ca = np.random.uniform(g_Ca_scaled*0.5, g_Ca_scaled*1.5, robustness_reruns)
    g_SK = np.random.uniform(g_SK_scaled*0.5, g_SK_scaled*1.5, robustness_reruns)
    g_l = np.random.uniform(g_l_scaled*0.5, g_l_scaled*1.5, robustness_reruns)

    g_BKs = np.ones(robustness_reruns)*g_BK
    As = np.ones(robustness_reruns)*A
    dts = np.ones(robustness_reruns)*dt
    parameters = np.array([g_BKs, g_K, g_Ca, g_SK, g_l, As, dts]).T

    # Run the model for each of the selected conductances
    # and calculate the burstiness factor of each evaluation
    pool = mp.Pool(processes=mp.cpu_count() - 2)

    burstiness_factors = []
    for burstiness_factor in tqdm(pool.imap(tabak_parallel, parameters),
                                  desc="Running model",
                                  total=robustness_reruns):

        if burstiness_factor is not None:
            burstiness_factors.append(burstiness_factor)

    pool.close()

    burstiness_factors = np.array(burstiness_factors)

    binned_burstiness_factors, bins = np.histogram(burstiness_factors, bins=bins, range=hist_range)

    bursters = len(np.where(burstiness_factors > 0.5)[0])/len(burstiness_factors)
    spikers = len(np.where(burstiness_factors < 0.3)[0])/len(burstiness_factors)

    return bins, binned_burstiness_factors, bursters, spikers
def perform_exploration():
    """
    Perform an limited parameter exploration of the average duration and
    burstiness of the model, varying g_SK and g_BK, as well as g_K and g_BK.

    Returns
    -------
    original_g_BKs : array
        The g_BK values used in the exploration, not scaled.
    original_g_SKs : array
        The g_SK values used in the exploration, not scaled.
    original_g_Ks : array
        The g_K values used in the exploration, not scaled.
    event_durations_SK : array
        The event durations when g_SK and g_BK is changed.
    event_durations_K : array
        The event durations when g_K and g_BK is changed.
    """
    model = un.NeuronModel(file="tabak.py", name="tabak")

    original_g_BKs = np.linspace(0, 1, nr_points_exploration)
    g_BKs = scale_conductance(original_g_BKs)

    # g_SK
    original_g_SKs = np.linspace(1, 3, nr_points_exploration)
    g_SKs = scale_conductance(original_g_SKs)

    event_durations_SK = np.zeros((len(original_g_BKs), len(original_g_SKs)))
    for i, g_BK in enumerate(tqdm(g_BKs, desc="Varying g_BK")):
        for j, g_SK in enumerate(tqdm(g_SKs, desc="Varying g_SK")):
            time, voltage, info = model.run(g_BK=g_BK,
                                            g_SK=g_SK,
                                            discard=discard,
                                            noise_amplitude=noise_amplitude,
                                            simulation_time=simulation_time)

            tmp_duration = np.mean(duration(time, voltage))

            if np.isnan(tmp_duration):
                tmp_duration = -1

            event_durations_SK[i, j] = tmp_duration

    original_g_Ks = np.linspace(1.5, 4.5, nr_points_exploration)
    g_Ks = scale_conductance(original_g_Ks)

    event_durations_K = np.zeros((len(original_g_BKs), len(original_g_Ks)))
    for i, g_BK in enumerate(tqdm(g_BKs, desc="Varying g_BK")):
        for j, g_K in enumerate(tqdm(g_Ks, desc="Varying g_K")):
            time, voltage, info = model.run(g_BK=g_BK,
                                            g_K=g_K,
                                            discard=discard,
                                            noise_amplitude=noise_amplitude,
                                            simulation_time=simulation_time)

            tmp_duration = np.mean(duration(time, voltage))

            if np.isnan(tmp_duration):
                tmp_duration = -1

            event_durations_K[i, j] = tmp_duration

    np.save(os.path.join(data_folder, "original_g_BKs"), original_g_BKs)
    np.save(os.path.join(data_folder, "original_g_SKs"), original_g_SKs)
    np.save(os.path.join(data_folder, "original_g_Ks"), original_g_Ks)
    np.save(os.path.join(data_folder, "event_durations_SK"),
            event_durations_SK)
    np.save(os.path.join(data_folder, "event_durations_K"), event_durations_K)

    return original_g_BKs, original_g_SKs, original_g_Ks, event_durations_SK, event_durations_K