Пример #1
0
def optimal_gauss_kernel_size(train, optimize_steps, progress=None):
    """ Return the optimal kernel size for a spike density estimation
    of a spike train for a gaussian kernel. This function takes a single
    spike train, which can be a superposition of multiple spike trains
    (created with :func:`collapsed_spike_trains`) that should be included
    in a spike density estimation.

    Implements the algorithm from
    (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010).

    :param train: The spike train for which the kernel
        size should be optimized.
    :type train: :class:`neo.core.SpikeTrain`
    :param optimize_steps: Array of kernel sizes to try (the best of
        these sizes will be returned).
    :type optimize_steps: Quantity 1D
    :param progress: Set this parameter to report progress. Will be
        advanced by len(`optimize_steps`) steps.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Best of the given kernel sizes
    :rtype: Quantity scalar
    """
    if not progress:
        progress = ProgressIndicator()

    x = train.rescale(optimize_steps.units)

    N = len(train)
    C = {}

    sampling_rate = 1024.0 / (x.t_stop - x.t_start)
    dt = float(1.0 / sampling_rate)
    y_hist = tools.bin_spike_trains({0: [x]}, sampling_rate)[0][0][0]
    y_hist = sp.asfarray(y_hist) / N / dt
    for step in optimize_steps:
        s = float(step)
        yh = sigproc.smooth(y_hist,
                            sigproc.GaussianKernel(2 * step),
                            sampling_rate,
                            num_bins=2048,
                            ensure_unit_area=True) * optimize_steps.units

        # Equation from Matlab code, 7/2012
        c = (sp.sum(yh**2) * dt - 2 * sp.sum(yh * y_hist) * dt +
             2 * 1 / sp.sqrt(2 * sp.pi) / s / N)
        C[s] = c * N * N
        progress.step()

    # Return kernel size with smallest cost
    return min(C, key=C.get) * optimize_steps.units
Пример #2
0
def optimal_gauss_kernel_size(train, optimize_steps, progress=None):
    """ Return the optimal kernel size for a spike density estimation
    of a spike train for a gaussian kernel. This function takes a single
    spike train, which can be a superposition of multiple spike trains
    (created with :func:`collapsed_spike_trains`) that should be included
    in a spike density estimation.

    Implements the algorithm from
    (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010).

    :param train: The spike train for which the kernel
        size should be optimized.
    :type train: :class:`neo.core.SpikeTrain`
    :param optimize_steps: Array of kernel sizes to try (the best of
        these sizes will be returned).
    :type optimize_steps: Quantity 1D
    :param progress: Set this parameter to report progress. Will be
        advanced by len(`optimize_steps`) steps.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Best of the given kernel sizes
    :rtype: Quantity scalar
    """
    if not progress:
        progress = ProgressIndicator()

    x = train.rescale(optimize_steps.units)

    N = len(train)
    C = {}

    sampling_rate = 1024.0 / (x.t_stop - x.t_start)
    dt = float(1.0 / sampling_rate)
    y_hist = tools.bin_spike_trains({0: [x]}, sampling_rate)[0][0][0]
    y_hist = sp.asfarray(y_hist) / N / dt
    for step in optimize_steps:
        s = float(step)
        yh = sigproc.smooth(
            y_hist, sigproc.GaussianKernel(2 * step), sampling_rate, num_bins=2048,
            ensure_unit_area=True) * optimize_steps.units

        # Equation from Matlab code, 7/2012
        c = (sp.sum(yh ** 2) * dt -
             2 * sp.sum(yh * y_hist) * dt +
             2 * 1 / sp.sqrt(2 * sp.pi) / s / N)
        C[s] = c * N * N
        progress.step()

    # Return kernel size with smallest cost
    return min(C, key=C.get) * optimize_steps.units
Пример #3
0
def optimal_gauss_kernel_size(train, optimize_steps, progress=None):
    """ Return the optimal kernel size for a spike density estimation
    of a spike train for a gaussian kernel. This function takes a single
    spike train, which can be a superposition of multiple spike trains
    (created with :func:`collapsed_spike_trains`) that should be included
    in a spike density estimation.

    Implements the algorithm from
    (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010).

    :param train: The spike train for which the kernel
        size should be optimized.
    :type train: :class:`neo.core.SpikeTrain`
    :param optimize_steps: Array of kernel sizes to try (the best of
        these sizes will be returned).
    :type optimize_steps: Quantity 1D
    :param progress: Set this parameter to report progress. Will be
        advanced by len(`optimize_steps`) steps.
    :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator`
    :returns: Best of the given kernel sizes
    :rtype: Quantity scalar
    """
    if not progress:
        progress = ProgressIndicator()

    x = train.rescale(optimize_steps.units)
    steps = sp.asarray(optimize_steps)

    N = len(train)
    C = {}

    bins = sp.linspace(x.t_start, x.t_stop, 1025)
    dt = float(bins[1] - bins[0])
    y_hist = sp.histogram(x, bins)[0] / N / dt
    for step in steps:
        s = float(step)
        yh = _hist_density(y_hist, gauss_kernel, step,
            train.t_start, train.t_stop)

        # Equation from Matlab code, 7/2012
        c = (sp.sum(yh**2) * dt -
             2 * sp.sum(yh * y_hist) * dt +
             2 * 1 / sp.sqrt(2 * sp.pi) / s / N)
        C[s] = c * N * N
        progress.step()

    # Return kernel size with smallest cost
    return min(C, key=C.get)*train.units
def get_refperiod_violations(spike_trains, refperiod, progress=None):
    """ Return the refractory period violations in the given spike trains
    for the specified refractory period.

    :param dict spike_trains: Dictionary of lists of
        :class:`neo.core.SpikeTrain` objects.
    :param refperiod: The refractory period (time).
    :type refperiod: Quantity scalar
    :param progress: Set this parameter to report progress.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Two values:

        * The total number of violations.
        * A dictionary (with the same indices as ``spike_trains``) of
          arrays with violation times (Quantity 1D with the same unit as
          ``refperiod``) for each spike train.
    :rtype: int, dict """
    if type(refperiod) != pq.Quantity or \
            refperiod.simplified.dimensionality != pq.s.dimensionality:
        raise ValueError('refperiod must be a time quantity!')

    if not progress:
        progress = ProgressIndicator()

    total_violations = 0
    violations = {}
    for u, tL in spike_trains.iteritems():
        violations[u] = []
        for i, t in enumerate(tL):
            st = t.copy()
            st.sort()
            isi = sp.diff(st)

            violations[u].append(st[isi < refperiod].rescale(refperiod.units))
            total_violations += len(violations[u][i])

            progress.step()

    return total_violations, violations
def get_refperiod_violations(spike_trains, refperiod, progress=None):
    """ Return the refractory period violations in the given spike trains
    for the specified refractory period.

    :param dict spike_trains: Dictionary of lists of
        :class:`neo.core.SpikeTrain` objects.
    :param refperiod: The refractory period (time).
    :type refperiod: Quantity scalar
    :param progress: Set this parameter to report progress.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Two values:

        * The total number of violations.
        * A dictionary (with the same indices as ``spike_trains``) of
          arrays with violation times (Quantity 1D with the same unit as
          ``refperiod``) for each spike train.
    :rtype: int, dict """
    if type(refperiod) != pq.Quantity or refperiod.simplified.dimensionality != pq.s.dimensionality:
        raise ValueError("refperiod must be a time quantity!")

    if not progress:
        progress = ProgressIndicator()

    total_violations = 0
    violations = {}
    for u, tL in spike_trains.iteritems():
        violations[u] = []
        for i, t in enumerate(tL):
            st = t.copy()
            st.sort()
            isi = sp.diff(st)

            violations[u].append(st[isi < refperiod].rescale(refperiod.units))
            total_violations += len(violations[u][i])

            progress.step()

    return total_violations, violations
Пример #6
0
def spike_amplitude_histogram(trains, num_bins, uniform_y_scale=True, unit=pq.uV, progress=None):
    """ Return a spike amplitude histogram.

    The resulting is useful to assess the drift in spike amplitude over a
    longer recording. It shows histograms (one for each ``trains`` entry,
    e.g. segment) of maximum and minimum spike amplitudes.

    :param list trains: A list of lists of :class:`neo.core.SpikeTrain`
        objects. Each entry of the outer list will be one point on the
        x-axis (they could correspond to segments), all amplitude occurences
        of spikes contained in the inner list will be added up.
    :param int num_bins: Number of bins for the histograms.
    :param bool uniform_y_scale: If True, the histogram for each channel
        will use the same bins. Otherwise, the minimum bin range is computed
        separately for each channel.
    :param Quantity unit: Unit of Y-Axis.
    :param progress: Set this parameter to report progress.
    :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator`
    :return: A tuple with three values:

        * A three-dimensional histogram matrix, where the first dimension
          corresponds to bins, the second dimension to the entries of
          ``trains`` (e.g. segments) and the third dimension to channels.
        * A list of the minimum amplitude value for each channel (all values
          will be equal if ``uniform_y_scale`` is true).
        * A list of the maximum amplitude value for each channel (all values
          will be equal if ``uniform_y_scale`` is true).
    :rtype: (ndarray, list, list)
    """
    if not progress:
        progress = ProgressIndicator()

    num_channels = 1
    for t in trains:
        if not t:
            continue
        num_channels = t[0].waveforms.shape[2]
        break

    progress.set_ticks(2 * len(trains))
    progress.set_status("Calculating Spike Amplitude Histogram")

    # Find maximum and minimum amplitudes on all channels
    up = [0] * num_channels
    down = [0] * num_channels
    for t in trains:
        for s in t:
            if s.waveforms is None:
                continue
            if s.waveforms.shape[2] != num_channels:
                raise SpykeException(
                    "All spikes need to have the same " + "numer of channels for Spike Amplitude Histogram!"
                )
            a = sp.asarray(s.waveforms.rescale(unit))
            u = a.max(1)
            d = a.min(1)
            for c in xrange(num_channels):
                up[c] = max(up[c], sp.stats.mstats.mquantiles(u[:, c], [0.999])[0])
                down[c] = min(down[c], sp.stats.mstats.mquantiles(d[:, c], [0.001])[0])
            progress.step()

    if uniform_y_scale:
        up = [max(up)] * num_channels
        down = [min(down)] * num_channels

    # Create histogram
    bins = [sp.linspace(down[c], up[c], num_bins + 1) for c in xrange(num_channels)]
    hist = sp.zeros((num_bins, len(trains), num_channels))
    for i, t in enumerate(trains):
        for s in t:
            if s.waveforms is None:
                continue
            a = sp.asarray(s.waveforms.rescale(unit))
            upper = a.max(1)
            lower = a.min(1)
            for c in xrange(num_channels):
                hist[:, i, c] += sp.histogram(upper[:, c], bins[c])[0]
                hist[:, i, c] += sp.histogram(lower[:, c], bins[c])[0]
        progress.step()

    return hist, down, up
Пример #7
0
def correlogram(trains, bin_size, max_lag=500 * pq.ms, border_correction=True,
                per_second=True, unit=pq.ms, progress=None):
    """ Return (cross-)correlograms from a dictionary of spike train
    lists for different units.

    :param dict trains: Dictionary of :class:`neo.core.SpikeTrain` lists.
    :param bin_size: Bin size (time).
    :type bin_size: Quantity scalar
    :param max_lag: Cut off (end time of calculated correlogram).
    :type max_lag: Quantity scalar
    :param bool border_correction: Apply correction for less data at higher
        timelags. Not perfect for bin_size != 1*``unit``, especially with
        large ``max_lag`` compared to length of spike trains.
    :param bool per_second: If ``True``, counts returned are per second.
        Otherwise, counts per spike train are returned.
    :param Quantity unit: Unit of X-Axis.
    :param progress: A ProgressIndicator object for the operation.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Two values:

        * An ordered dictionary indexed with the indices of ``trains`` of
          ordered dictionaries indexed with the same indices. Entries of
          the inner dictionaries are the resulting (cross-)correlograms as
          numpy arrays. All crosscorrelograms can be indexed in two
          different ways: ``c[index1][index2]`` and ``c[index2][index1]``.
        * The bins used for the correlogram calculation.
    :rtype: dict, Quantity 1D
    """
    if not progress:
        progress = ProgressIndicator()

    bin_size.rescale(unit)
    max_lag.rescale(unit)

    # Create bins, making sure that 0 is at the center of central bin
    half_bins = sp.arange(bin_size / 2, max_lag, bin_size)
    all_bins = list(reversed(-half_bins))
    all_bins.extend(half_bins)
    bins = sp.array(all_bins) * unit
    middle_bin = len(bins) / 2 - 1

    indices = trains.keys()
    num_trains = len(trains[indices[0]])
    if not num_trains:
        raise SpykeException('Could not create correlogram: No spike trains!')
    for u in range(1, len(indices)):
        if len(trains[indices[u]]) != num_trains:
            raise SpykeException('Could not create correlogram: All units ' +
                                 'need the same number of spike trains!')

    progress.set_ticks(sp.sum(range(len(trains) + 1) * num_trains))

    corrector = 1
    if border_correction:
        # Need safe min/max functions
        def safe_max(seq):
            if len(seq) < 1:
                return 0
            return max(seq)

        def safe_min(seq):
            if len(seq) < 1:
                return 2 ** 22  # Some arbitrary large value
            return min(seq)

        max_w = max([max([safe_max(t) for t in l])
                     for l in trains.itervalues()])
        min_w = min([min([safe_min(t) for t in l])
                     for l in trains.itervalues()])

        train_length = (max_w - min_w)
        l = int(round(middle_bin)) + 1
        cE = max(train_length - (l * bin_size) + 1 * unit, 1 * unit)

        corrector = (train_length / sp.concatenate(
            (sp.linspace(cE, train_length, l - 1, False),
             sp.linspace(train_length, cE, l)))).magnitude

    correlograms = OrderedDict()
    for i1 in xrange(len(indices)):  # For each index
        # For all later indices, including itself
        for i2 in xrange(i1, len(indices)):
            histogram = sp.zeros(len(bins) - 1)
            for t in xrange(num_trains):
                train1 = trains[indices[i1]][t].rescale(unit).reshape((1, -1))
                train2 = trains[indices[i2]][t].rescale(unit).reshape((-1, 1))
                histogram += sp.histogram(
                    sp.subtract(train1, train2), bins=bins)[0]
                if i1 == i2:  # Correction for autocorrelogram
                    histogram[middle_bin] -= len(train2)
                progress.step()

            if per_second:
                l = train1.t_stop - train1.t_start
                if train2.t_stop - train2.t_start != l:
                    raise SpykeException(
                        'A spike train pair does not have equal length,'
                        'cannot calculate count per second.')
                histogram /= l.rescale(pq.s)

            crg = corrector * histogram / num_trains
            if indices[i1] not in correlograms:
                correlograms[indices[i1]] = OrderedDict()
            correlograms[indices[i1]][indices[i2]] = crg
            if i1 != i2:
                if indices[i2] not in correlograms:
                    correlograms[indices[i2]] = OrderedDict()
                correlograms[indices[i2]][indices[i1]] = crg[::-1]

    return correlograms, bins
Пример #8
0
def spike_amplitude_histogram(trains, num_bins, uniform_y_scale=True,
                              unit=pq.uV, progress=None):
    """ Return a spike amplitude histogram.

    The resulting is useful to assess the drift in spike amplitude over a
    longer recording. It shows histograms (one for each ``trains`` entry,
    e.g. segment) of maximum and minimum spike amplitudes.

    :param list trains: A list of lists of :class:`neo.core.SpikeTrain`
        objects. Each entry of the outer list will be one point on the
        x-axis (they could correspond to segments), all amplitude occurences
        of spikes contained in the inner list will be added up.
    :param int num_bins: Number of bins for the histograms.
    :param bool uniform_y_scale: If True, the histogram for each channel
        will use the same bins. Otherwise, the minimum bin range is computed
        separately for each channel.
    :param Quantity unit: Unit of Y-Axis.
    :param progress: Set this parameter to report progress.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :return: A tuple with three values:

        * A three-dimensional histogram matrix, where the first dimension
          corresponds to bins, the second dimension to the entries of
          ``trains`` (e.g. segments) and the third dimension to channels.
        * A list of the minimum amplitude value for each channel (all values
          will be equal if ``uniform_y_scale`` is true).
        * A list of the maximum amplitude value for each channel (all values
          will be equal if ``uniform_y_scale`` is true).
    :rtype: (ndarray, list, list)
    """
    if not progress:
        progress = ProgressIndicator()

    num_channels = 1
    for t in trains:
        if not t:
            continue
        num_channels = t[0].waveforms.shape[2]
        break

    progress.set_ticks(2*len(trains))
    progress.set_status('Calculating Spike Amplitude Histogram')

    # Find maximum and minimum amplitudes on all channels
    up = [0] * num_channels
    down = [0] * num_channels
    for t in trains:
        for s in t:
            if s.waveforms is None:
                continue
            if s.waveforms.shape[2] != num_channels:
                raise SpykeException('All spikes need to have the same ' +
                                     'numer of channels for Spike Amplitude Histogram!')
            a = sp.asarray(s.waveforms.rescale(unit))
            u = a.max(1)
            d = a.min(1)
            for c in xrange(num_channels):
                up[c] = max(up[c], sp.stats.mstats.mquantiles(
                    u[:,c], [0.999])[0])
                down[c] = min(down[c], sp.stats.mstats.mquantiles(
                    d[:,c], [0.001])[0])
            progress.step()

    if uniform_y_scale:
        up = [max(up)] * num_channels
        down = [min(down)] * num_channels

    # Create histogram
    bins = [sp.linspace(down[c],up[c], num_bins+1)
            for c in xrange(num_channels)]
    hist = sp.zeros((num_bins, len(trains), num_channels))
    for i, t in enumerate(trains):
        for s in t:
            if s.waveforms is None:
                continue
            a = sp.asarray(s.waveforms.rescale(unit))
            upper = a.max(1)
            lower = a.min(1)
            for c in xrange(num_channels):
                hist[:,i,c] += sp.histogram(upper[:,c], bins[c])[0]
                hist[:,i,c] += sp.histogram(lower[:,c], bins[c])[0]
        progress.step()

    return hist, down, up