コード例 #1
0
def optimal_gauss_kernel_size(train, optimize_steps, progress=None):
    """ Return the optimal kernel size for a spike density estimation
    of a spike train for a gaussian kernel. This function takes a single
    spike train, which can be a superposition of multiple spike trains
    (created with :func:`collapsed_spike_trains`) that should be included
    in a spike density estimation.

    Implements the algorithm from
    (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010).

    :param train: The spike train for which the kernel
        size should be optimized.
    :type train: :class:`neo.core.SpikeTrain`
    :param optimize_steps: Array of kernel sizes to try (the best of
        these sizes will be returned).
    :type optimize_steps: Quantity 1D
    :param progress: Set this parameter to report progress. Will be
        advanced by len(`optimize_steps`) steps.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Best of the given kernel sizes
    :rtype: Quantity scalar
    """
    if not progress:
        progress = ProgressIndicator()

    x = train.rescale(optimize_steps.units)

    N = len(train)
    C = {}

    sampling_rate = 1024.0 / (x.t_stop - x.t_start)
    dt = float(1.0 / sampling_rate)
    y_hist = tools.bin_spike_trains({0: [x]}, sampling_rate)[0][0][0]
    y_hist = sp.asfarray(y_hist) / N / dt
    for step in optimize_steps:
        s = float(step)
        yh = sigproc.smooth(y_hist,
                            sigproc.GaussianKernel(2 * step),
                            sampling_rate,
                            num_bins=2048,
                            ensure_unit_area=True) * optimize_steps.units

        # Equation from Matlab code, 7/2012
        c = (sp.sum(yh**2) * dt - 2 * sp.sum(yh * y_hist) * dt +
             2 * 1 / sp.sqrt(2 * sp.pi) / s / N)
        C[s] = c * N * N
        progress.step()

    # Return kernel size with smallest cost
    return min(C, key=C.get) * optimize_steps.units
コード例 #2
0
ファイル: rate_estimation.py プロジェクト: rproepp/spykeutils
def optimal_gauss_kernel_size(train, optimize_steps, progress=None):
    """ Return the optimal kernel size for a spike density estimation
    of a spike train for a gaussian kernel. This function takes a single
    spike train, which can be a superposition of multiple spike trains
    (created with :func:`collapsed_spike_trains`) that should be included
    in a spike density estimation.

    Implements the algorithm from
    (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010).

    :param train: The spike train for which the kernel
        size should be optimized.
    :type train: :class:`neo.core.SpikeTrain`
    :param optimize_steps: Array of kernel sizes to try (the best of
        these sizes will be returned).
    :type optimize_steps: Quantity 1D
    :param progress: Set this parameter to report progress. Will be
        advanced by len(`optimize_steps`) steps.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Best of the given kernel sizes
    :rtype: Quantity scalar
    """
    if not progress:
        progress = ProgressIndicator()

    x = train.rescale(optimize_steps.units)

    N = len(train)
    C = {}

    sampling_rate = 1024.0 / (x.t_stop - x.t_start)
    dt = float(1.0 / sampling_rate)
    y_hist = tools.bin_spike_trains({0: [x]}, sampling_rate)[0][0][0]
    y_hist = sp.asfarray(y_hist) / N / dt
    for step in optimize_steps:
        s = float(step)
        yh = sigproc.smooth(
            y_hist, sigproc.GaussianKernel(2 * step), sampling_rate, num_bins=2048,
            ensure_unit_area=True) * optimize_steps.units

        # Equation from Matlab code, 7/2012
        c = (sp.sum(yh ** 2) * dt -
             2 * sp.sum(yh * y_hist) * dt +
             2 * 1 / sp.sqrt(2 * sp.pi) / s / N)
        C[s] = c * N * N
        progress.step()

    # Return kernel size with smallest cost
    return min(C, key=C.get) * optimize_steps.units
コード例 #3
0
def _victor_purpura_multiunit_dist_for_trial_pair(
        a, b, reassignment_cost, kernel):
    # The algorithm used is based on the one given in
    #
    # Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal
    # coding in visual cortex: a metric-space analysis. Journal of
    # Neurophysiology.
    #
    # It constructs a matrix cost[i, j_1, ... j_L] containing the minimal cost
    # when only considering the first i spikes of the merged spikes of a and
    # j_w spikes of the spike trains of b (the reference given above denotes
    # this matrix with G). In this implementation the only the one submatrix
    # for one specific i is stored as in each step only i-1 and i will be
    # accessed. That saves some memory.

    # Initialization of various variables needed by the algorithm. Also swap
    # a and b if it will save time as the algorithm is not symmetric.
    a_num_spikes = [st.size for st in a]
    b_num_spikes = [st.size for st in b]
    a_num_total_spikes = sp.sum(a_num_spikes)

    complexity_same = a_num_total_spikes * sp.prod(b_num_spikes)
    complexity_swapped = sp.prod(a_num_spikes) * sp.sum(b_num_spikes)
    if complexity_swapped < complexity_same:
        a, b = b, a
        a_num_spikes, b_num_spikes = b_num_spikes, a_num_spikes
        a_num_total_spikes = sp.sum(a_num_spikes)

    if a_num_total_spikes <= 0:
        return sp.sum(b_num_spikes)

    b_dims = tuple(sp.asarray(b_num_spikes) + 1)

    cost = sp.asfarray(sp.sum(sp.indices(b_dims), axis=0))

    a_merged = _merge_trains_and_label_spikes(a)
    b_strides = sp.cumprod((b_dims + (1,))[::-1])[:-1]
    flat_b_indices = sp.arange(cost.size)
    b_indices = sp.vstack(sp.unravel_index(flat_b_indices, b_dims))
    flat_neighbor_indices = sp.maximum(
        0, sp.atleast_2d(flat_b_indices).T - b_strides[::-1])
    invalid_neighbors = b_indices.T == 0

    b_train_mat = sp.empty((len(b), sp.amax(b_num_spikes))) * b[0].units
    for i, st in enumerate(b):
        b_train_mat[i, :st.size] = st.rescale(b[0].units)
        b_train_mat[i, st.size:] = sp.nan * b[0].units

    reassignment_costs = sp.empty((a_merged[0].size,) + b_train_mat.shape)
    reassignment_costs.fill(reassignment_cost)
    reassignment_costs[sp.arange(a_merged[1].size), a_merged[1], :] = 0.0
    k = 1 - 2 * kernel(sp.atleast_2d(
        a_merged[0]).T - b_train_mat.flatten()).simplified.reshape(
            (a_merged[0].size,) + b_train_mat.shape) + reassignment_costs

    decreasing_sequence = flat_b_indices[::-1]

    # Do the actual calculations.
    for a_idx in xrange(1, a_num_total_spikes + 1):
        base_costs = cost.flat[flat_neighbor_indices]
        base_costs[invalid_neighbors] = sp.inf
        min_base_cost_labels = sp.argmin(base_costs, axis=1)
        cost_all_possible_shifts = k[a_idx - 1, min_base_cost_labels, :] + \
            sp.atleast_2d(base_costs[flat_b_indices, min_base_cost_labels]).T
        cost_shift = cost_all_possible_shifts[
            sp.arange(cost_all_possible_shifts.shape[0]),
            b_indices[min_base_cost_labels, flat_b_indices] - 1]

        cost_delete_in_a = cost.flat[flat_b_indices]

        # cost_shift is dimensionless, but there is a bug in quantities with
        # the minimum function:
        # <https://github.com/python-quantities/python-quantities/issues/52>
        # The explicit request for the magnitude circumvents this problem.
        cost.flat = sp.minimum(cost_delete_in_a, cost_shift.magnitude) + 1
        cost.flat[0] = sp.inf

        # Minimum with cost for deleting in b
        # The calculation order is somewhat different from the order one would
        # expect from the naive algorithm. This implementation, however,
        # optimizes the use of the CPU cache giving a considerable speed
        # improvement.
        # Basically this codes calculates the values of a row of elements for
        # each dimension of cost.
        for dim_size, stride in zip(b_dims[::-1], b_strides):
            for i in xrange(stride):
                segment_size = dim_size * stride
                for j in xrange(i, cost.size, segment_size):
                    s = sp.s_[j:j + segment_size:stride]
                    seq = decreasing_sequence[-cost.flat[s].size:]
                    cost.flat[s] = sp.minimum.accumulate(
                        cost.flat[s] + seq) - seq

    return cost.flat[-1]
コード例 #4
0
def _victor_purpura_multiunit_dist_for_trial_pair(a, b, reassignment_cost,
                                                  kernel):
    # The algorithm used is based on the one given in
    #
    # Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal
    # coding in visual cortex: a metric-space analysis. Journal of
    # Neurophysiology.
    #
    # It constructs a matrix cost[i, j_1, ... j_L] containing the minimal cost
    # when only considering the first i spikes of the merged spikes of a and
    # j_w spikes of the spike trains of b (the reference given above denotes
    # this matrix with G). In this implementation the only the one submatrix
    # for one specific i is stored as in each step only i-1 and i will be
    # accessed. That saves some memory.

    # Initialization of various variables needed by the algorithm. Also swap
    # a and b if it will save time as the algorithm is not symmetric.
    a_num_spikes = [st.size for st in a]
    b_num_spikes = [st.size for st in b]
    a_num_total_spikes = sp.sum(a_num_spikes)

    complexity_same = a_num_total_spikes * sp.prod(b_num_spikes)
    complexity_swapped = sp.prod(a_num_spikes) * sp.sum(b_num_spikes)
    if complexity_swapped < complexity_same:
        a, b = b, a
        a_num_spikes, b_num_spikes = b_num_spikes, a_num_spikes
        a_num_total_spikes = sp.sum(a_num_spikes)

    if a_num_total_spikes <= 0:
        return sp.sum(b_num_spikes)

    b_dims = tuple(sp.asarray(b_num_spikes) + 1)

    cost = sp.asfarray(sp.sum(sp.indices(b_dims), axis=0))

    a_merged = _merge_trains_and_label_spikes(a)
    b_strides = sp.cumprod((b_dims + (1, ))[::-1])[:-1]
    flat_b_indices = sp.arange(cost.size)
    b_indices = sp.vstack(sp.unravel_index(flat_b_indices, b_dims))
    flat_neighbor_indices = sp.maximum(
        0,
        sp.atleast_2d(flat_b_indices).T - b_strides[::-1])
    invalid_neighbors = b_indices.T == 0

    b_train_mat = sp.empty((len(b), sp.amax(b_num_spikes))) * b[0].units
    for i, st in enumerate(b):
        b_train_mat[i, :st.size] = st.rescale(b[0].units)
        b_train_mat[i, st.size:] = sp.nan * b[0].units

    reassignment_costs = sp.empty((a_merged[0].size, ) + b_train_mat.shape)
    reassignment_costs.fill(reassignment_cost)
    reassignment_costs[sp.arange(a_merged[1].size), a_merged[1], :] = 0.0
    k = 1 - 2 * kernel(sp.atleast_2d(a_merged[0]).T -
                       b_train_mat.flatten()).simplified.reshape(
                           (a_merged[0].size, ) +
                           b_train_mat.shape) + reassignment_costs

    decreasing_sequence = flat_b_indices[::-1]

    # Do the actual calculations.
    for a_idx in xrange(1, a_num_total_spikes + 1):
        base_costs = cost.flat[flat_neighbor_indices]
        base_costs[invalid_neighbors] = sp.inf
        min_base_cost_labels = sp.argmin(base_costs, axis=1)
        cost_all_possible_shifts = k[a_idx - 1, min_base_cost_labels, :] + \
            sp.atleast_2d(base_costs[flat_b_indices, min_base_cost_labels]).T
        cost_shift = cost_all_possible_shifts[
            sp.arange(cost_all_possible_shifts.shape[0]),
            b_indices[min_base_cost_labels, flat_b_indices] - 1]

        cost_delete_in_a = cost.flat[flat_b_indices]

        # cost_shift is dimensionless, but there is a bug in quantities with
        # the minimum function:
        # <https://github.com/python-quantities/python-quantities/issues/52>
        # The explicit request for the magnitude circumvents this problem.
        cost.flat = sp.minimum(cost_delete_in_a, cost_shift.magnitude) + 1
        cost.flat[0] = sp.inf

        # Minimum with cost for deleting in b
        # The calculation order is somewhat different from the order one would
        # expect from the naive algorithm. This implementation, however,
        # optimizes the use of the CPU cache giving a considerable speed
        # improvement.
        # Basically this codes calculates the values of a row of elements for
        # each dimension of cost.
        for dim_size, stride in zip(b_dims[::-1], b_strides):
            for i in xrange(stride):
                segment_size = dim_size * stride
                for j in xrange(i, cost.size, segment_size):
                    s = sp.s_[j:j + segment_size:stride]
                    seq = decreasing_sequence[-cost.flat[s].size:]
                    cost.flat[s] = sp.minimum.accumulate(cost.flat[s] +
                                                         seq) - seq

    return cost.flat[-1]
コード例 #5
0
import pandas as pd
import tensorflow as tf
from sklearn.datasets import load_iris
import scipy as sc
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
data = load_iris()
x = sc.asfarray(data.data)

#checking x for null values
print(sum(sc.isnan(x)))

y = sc.asfarray(data.target)
#checking y for null values
print(sum(sc.isnan(y)))

#labels and there mappings
labels = data.target_names
labelDict = {
    0: labels[0],
    1: labels[1],
    2: labels[2],
}

#NO visualization is done in this practice

#spilliting data

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
print(x_train.shape, y_train.shape)
コード例 #6
0
def normalized_vp_dist(trains, tau):
    num_spikes = sp.atleast_2d(sp.asarray([st.size for st in trains]))
    normalization = num_spikes + num_spikes.T
    normalization[normalization == 0.0] = 1.0
    return sp.asfarray(
        stm.victor_purpura_dist(trains, 2.0 / tau, sort=False)) / normalization