示例#1
0
def concatenate_spike_trains(trains):
    """ Concatenates spike trains.

    :param sequence trains: :class:`neo.core.SpikeTrain` objects to
        concatenate.
    :returns: A spike train consisting of the concatenated spike trains. The
        spikes will be in the order of the given spike trains and ``t_start``
        and ``t_stop`` will be set to the minimum and maximum value.
    :rtype: :class:`neo.core.SpikeTrain`
    """

    t_start, t_stop = maximum_spike_train_interval({0: trains})
    return neo.SpikeTrain(
        spq.concatenate([train.view(type=pq.Quantity) for train in trains]),
        t_start=t_start, t_stop=t_stop)
def event_synchronization(
        trains, tau=None,
        kernel=sigproc.RectangularKernel(1.0, normalize=False), sort=True):
    """ event_synchronization(trains, tau=None, kernel=signal_processing.RectangularKernel(1.0, normalize=False), sort=True)

    Calculates the event synchronization.

    Let :math:`d(x|y)` be the count of spikes in :math:`y` which occur shortly
    before an event in :math:`x` with a time difference of less than
    :math:`\\tau`. Moreover, let :math:`n_x` and :math:`n_y` be the number of
    total spikes in the spike trains :math:`x` and :math:`y`. The event
    synchrony is then defined as :math:`Q_T = \\frac{d(x|y)
    + d(y|x)}{\\sqrt{n_x n_y}}`.

    The time maximum time lag :math:`\\tau` can be determined automatically for
    each pair of spikes :math:`t^x_i` and :math:`t^y_j` by the formula
    :math:`\\tau_{ij} = \\frac{1}{2} \\min\{t^x_{i+1} - t^x_i, t^x_i - t^x_{i-1},
    t^y_{j+1} - t^y_j, t^y_j - t^y_{j-1}\}`

    Further and more detailed information can be found in
    *Quiroga, R. Q., Kreuz, T., & Grassberger, P. (2002). Event
    synchronization: a simple and fast method to measure synchronicity and time
    delay patterns. Physical Review E, 66(4), 041904.*

    :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of
        which the van Rossum distance will be calculated pairwise.
    :param tau: The maximum time lag for two spikes to be considered coincident
        or synchronous as time scalar. To have it determined automatically by
        above formula set it to `None`.
    :type tau: Quantity scalar
    :param kernel: Kernel to use in the calculation of the distance.
    :type kernel: :class:`.signal_processing.Kernel`
    :param bool sort: Spike trains with sorted spike times are be needed for
        the calculation. You can set `sort` to `False` if you know that your
        spike trains are already sorted to decrease calculation time.
    :returns: Matrix containing the event synchronization for all pairs of spike
        trains.
    :rtype: 2-D array
    """

    trains = [st.view(type=pq.Quantity) for st in trains]
    if sort:
        trains = [sp.sort(st) for st in trains]

    if tau is None:
        inf_array = sp.array([sp.inf])
        isis = [spq.concatenate(
                (inf_array * st.units, sp.diff(st), inf_array * st.units))
                for st in trains]
        auto_taus = [spq.minimum(t[:-1], t[1:]) for t in isis]

    def compute(i, j):
        if i == j:
            return 1.0
        else:
            if tau is None:
                tau_mat = spq.minimum(*spq.meshgrid(
                    auto_taus[i], auto_taus[j])) / 2.0
            else:
                tau_mat = sp.tile(tau, (trains[j].size, trains[i].size))
            coincidence = sp.sum(kernel(
                (trains[i] - sp.atleast_2d(trains[j]).T) / tau_mat))
            normalization = 1.0 / sp.sqrt(trains[i].size * trains[j].size)
            return normalization * coincidence

    return _create_matrix_from_indexed_function(
        (len(trains), len(trains)), compute, kernel.is_symmetric())
def _merge_trains_and_label_spikes(trains):
    labels = sp.concatenate(
        [sp.zeros(st.size, dtype=int) + i for i, st in enumerate(trains)])
    trains = spq.concatenate([st.view(dtype=pq.Quantity) for st in trains])
    sorted_indices = sp.argsort(trains)
    return trains[sorted_indices], labels[sorted_indices]
def _merge_trains_and_label_spikes(trains):
    labels = sp.concatenate(
        [sp.zeros(st.size, dtype=int) + i for i, st in enumerate(trains)])
    trains = spq.concatenate([st.view(dtype=pq.Quantity) for st in trains])
    sorted_indices = sp.argsort(trains)
    return trains[sorted_indices], labels[sorted_indices]
def event_synchronization(trains,
                          tau=None,
                          kernel=sigproc.RectangularKernel(1.0,
                                                           normalize=False),
                          sort=True):
    """ event_synchronization(trains, tau=None, kernel=signal_processing.RectangularKernel(1.0, normalize=False), sort=True)

    Calculates the event synchronization.

    Let :math:`d(x|y)` be the count of spikes in :math:`y` which occur shortly
    before an event in :math:`x` with a time difference of less than
    :math:`\\tau`. Moreover, let :math:`n_x` and :math:`n_y` be the number of
    total spikes in the spike trains :math:`x` and :math:`y`. The event
    synchrony is then defined as :math:`Q_T = \\frac{d(x|y)
    + d(y|x)}{\\sqrt{n_x n_y}}`.

    The time maximum time lag :math:`\\tau` can be determined automatically for
    each pair of spikes :math:`t^x_i` and :math:`t^y_j` by the formula
    :math:`\\tau_{ij} = \\frac{1}{2} \\min\{t^x_{i+1} - t^x_i, t^x_i - t^x_{i-1},
    t^y_{j+1} - t^y_j, t^y_j - t^y_{j-1}\}`

    Further and more detailed information can be found in
    *Quiroga, R. Q., Kreuz, T., & Grassberger, P. (2002). Event
    synchronization: a simple and fast method to measure synchronicity and time
    delay patterns. Physical Review E, 66(4), 041904.*

    :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of
        which the van Rossum distance will be calculated pairwise.
    :param tau: The maximum time lag for two spikes to be considered coincident
        or synchronous as time scalar. To have it determined automatically by
        above formula set it to `None`.
    :type tau: Quantity scalar
    :param kernel: Kernel to use in the calculation of the distance.
    :type kernel: :class:`.signal_processing.Kernel`
    :param bool sort: Spike trains with sorted spike times are be needed for
        the calculation. You can set `sort` to `False` if you know that your
        spike trains are already sorted to decrease calculation time.
    :returns: Matrix containing the event synchronization for all pairs of spike
        trains.
    :rtype: 2-D array
    """

    trains = [st.view(type=pq.Quantity) for st in trains]
    if sort:
        trains = [sp.sort(st) for st in trains]

    if tau is None:
        inf_array = sp.array([sp.inf])
        isis = [
            spq.concatenate(
                (inf_array * st.units, sp.diff(st), inf_array * st.units))
            for st in trains
        ]
        auto_taus = [spq.minimum(t[:-1], t[1:]) for t in isis]

    def compute(i, j):
        if i == j:
            return 1.0
        else:
            if tau is None:
                tau_mat = spq.minimum(
                    *spq.meshgrid(auto_taus[i], auto_taus[j])) / 2.0
            else:
                tau_mat = sp.tile(tau, (trains[j].size, trains[i].size))
            coincidence = sp.sum(
                kernel((trains[i] - sp.atleast_2d(trains[j]).T) / tau_mat))
            normalization = 1.0 / sp.sqrt(trains[i].size * trains[j].size)
            return normalization * coincidence

    return _create_matrix_from_indexed_function((len(trains), len(trains)),
                                                compute, kernel.is_symmetric())
def gen_homogeneous_poisson(rate,
                            t_start=0 * pq.s,
                            t_stop=None,
                            max_spikes=None,
                            refractory=0 * pq.s):
    """ Generate a homogeneous Poisson spike train. The length is controlled
    with `t_stop` and `max_spikes`. Either one or both of these arguments have
    to be given.

    :param rate: Average firing rate of the spike train to generate as
        frequency scalar.
    :type rate: Quantity scalar
    :param t_start: Time at which the spike train begins as time scalar. The
        first actual spike will be greater than this time.
    :type t_start: Quantity scalar
    :param t_stop: Time at which the spike train ends as time scalar. All
        generated spikes will be lower or equal than this time. If set to None,
        the number of generated spikes is controlled by `max_spikes` and
        `t_stop` will be equal to the last generated spike.
    :type t_stop: Quantity scalar
    :param max_spikes: Maximum number of spikes to generate. Fewer spikes might
        be generated in case `t_stop` is also set.
    :param refractory: Absolute refractory period as time scalar. No spike will
        follow another spike for the given duration. Afterwards the firing rate
        will instantaneously be set to `rate` again.
    :type refractory: Quantity scalar

    :returns: The generated spike train.
    :rtype: :class:`neo.core.SpikeTrain`
    """

    if t_stop is None and max_spikes is None:
        raise ValueError('Either t_stop or max_spikes has to be set.')

    if max_spikes is not None:
        spike_times = sp.cumsum(numpy.random.exponential(
            rate**-1, max_spikes)) * (rate.units**-1).simplified
        spike_times += t_start
        if refractory > 0:
            spike_times += sp.arange(spike_times.size) * refractory
        if t_stop is not None:
            spike_times = spike_times[spike_times <= t_stop]
    else:
        scale = (rate**-1).rescale(t_stop.units)
        trains = []
        last_spike = t_start.rescale(t_stop.units)
        while last_spike < t_stop:
            # Generate a bit more than the average number of expected spike to
            # be finished in most cases in one loop. The factor was determined
            # empirically.
            num_spikes = int(1.7 *
                             ((t_stop - last_spike) * rate).simplified) + 1
            train = sp.cumsum(numpy.random.exponential(scale, num_spikes)) * \
                scale.units + last_spike
            if refractory > 0:
                train += sp.arange(train.size) * refractory
            if train.size > 0:
                last_spike = train[-1]
                if last_spike >= t_stop:
                    train = train[train < t_stop]
                trains.append(train)
        spike_times = spq.concatenate(trains)

    if t_stop is None:
        t_stop = spike_times[-1]
    return neo.SpikeTrain(spike_times, t_start=t_start, t_stop=t_stop)
def gen_homogeneous_poisson(
        rate, t_start=0 * pq.s, t_stop=None, max_spikes=None,
        refractory=0 * pq.s):
    """ Generate a homogeneous Poisson spike train. The length is controlled
    with `t_stop` and `max_spikes`. Either one or both of these arguments have
    to be given.

    :param rate: Average firing rate of the spike train to generate as
        frequency scalar.
    :type rate: Quantity scalar
    :param t_start: Time at which the spike train begins as time scalar. The
        first actual spike will be greater than this time.
    :type t_start: Quantity scalar
    :param t_stop: Time at which the spike train ends as time scalar. All
        generated spikes will be lower or equal than this time. If set to None,
        the number of generated spikes is controlled by `max_spikes` and
        `t_stop` will be equal to the last generated spike.
    :type t_stop: Quantity scalar
    :param max_spikes: Maximum number of spikes to generate. Fewer spikes might
        be generated in case `t_stop` is also set.
    :param refractory: Absolute refractory period as time scalar. No spike will
        follow another spike for the given duration. Afterwards the firing rate
        will instantaneously be set to `rate` again.
    :type refractory: Quantity scalar

    :returns: The generated spike train.
    :rtype: :class:`neo.core.SpikeTrain`
    """

    if t_stop is None and max_spikes is None:
        raise ValueError('Either t_stop or max_spikes has to be set.')

    if max_spikes is not None:
        spike_times = sp.cumsum(numpy.random.exponential(
            rate ** -1, max_spikes)) * (rate.units ** -1).simplified
        spike_times += t_start
        if refractory > 0:
            spike_times += sp.arange(spike_times.size) * refractory
        if t_stop is not None:
            spike_times = spike_times[spike_times <= t_stop]
    else:
        scale = (rate ** -1).rescale(t_stop.units)
        trains = []
        last_spike = t_start.rescale(t_stop.units)
        while last_spike < t_stop:
            # Generate a bit more than the average number of expected spike to
            # be finished in most cases in one loop. The factor was determined
            # empirically.
            num_spikes = int(1.7 * (
                (t_stop - last_spike) * rate).simplified) + 1
            train = sp.cumsum(numpy.random.exponential(scale, num_spikes)) * \
                scale.units + last_spike
            if refractory > 0:
                train += sp.arange(train.size) * refractory
            if train.size > 0:
                last_spike = train[-1]
                if last_spike >= t_stop:
                    train = train[train < t_stop]
                trains.append(train)
        spike_times = spq.concatenate(trains)

    if t_stop is None:
        t_stop = spike_times[-1]
    return neo.SpikeTrain(spike_times, t_start=t_start, t_stop=t_stop)