Esempio n. 1
0
def optimal_gauss_kernel_size(train, optimize_steps, progress=None):
    """ Return the optimal kernel size for a spike density estimation
    of a spike train for a gaussian kernel. This function takes a single
    spike train, which can be a superposition of multiple spike trains
    (created with :func:`collapsed_spike_trains`) that should be included
    in a spike density estimation.

    Implements the algorithm from
    (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010).

    :param train: The spike train for which the kernel
        size should be optimized.
    :type train: :class:`neo.core.SpikeTrain`
    :param optimize_steps: Array of kernel sizes to try (the best of
        these sizes will be returned).
    :type optimize_steps: Quantity 1D
    :param progress: Set this parameter to report progress. Will be
        advanced by len(`optimize_steps`) steps.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Best of the given kernel sizes
    :rtype: Quantity scalar
    """
    if not progress:
        progress = ProgressIndicator()

    x = train.rescale(optimize_steps.units)

    N = len(train)
    C = {}

    sampling_rate = 1024.0 / (x.t_stop - x.t_start)
    dt = float(1.0 / sampling_rate)
    y_hist = tools.bin_spike_trains({0: [x]}, sampling_rate)[0][0][0]
    y_hist = sp.asfarray(y_hist) / N / dt
    for step in optimize_steps:
        s = float(step)
        yh = sigproc.smooth(y_hist,
                            sigproc.GaussianKernel(2 * step),
                            sampling_rate,
                            num_bins=2048,
                            ensure_unit_area=True) * optimize_steps.units

        # Equation from Matlab code, 7/2012
        c = (sp.sum(yh**2) * dt - 2 * sp.sum(yh * y_hist) * dt +
             2 * 1 / sp.sqrt(2 * sp.pi) / s / N)
        C[s] = c * N * N
        progress.step()

    # Return kernel size with smallest cost
    return min(C, key=C.get) * optimize_steps.units
Esempio n. 2
0
def optimal_gauss_kernel_size(train, optimize_steps, progress=None):
    """ Return the optimal kernel size for a spike density estimation
    of a spike train for a gaussian kernel. This function takes a single
    spike train, which can be a superposition of multiple spike trains
    (created with :func:`collapsed_spike_trains`) that should be included
    in a spike density estimation.

    Implements the algorithm from
    (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010).

    :param train: The spike train for which the kernel
        size should be optimized.
    :type train: :class:`neo.core.SpikeTrain`
    :param optimize_steps: Array of kernel sizes to try (the best of
        these sizes will be returned).
    :type optimize_steps: Quantity 1D
    :param progress: Set this parameter to report progress. Will be
        advanced by len(`optimize_steps`) steps.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Best of the given kernel sizes
    :rtype: Quantity scalar
    """
    if not progress:
        progress = ProgressIndicator()

    x = train.rescale(optimize_steps.units)

    N = len(train)
    C = {}

    sampling_rate = 1024.0 / (x.t_stop - x.t_start)
    dt = float(1.0 / sampling_rate)
    y_hist = tools.bin_spike_trains({0: [x]}, sampling_rate)[0][0][0]
    y_hist = sp.asfarray(y_hist) / N / dt
    for step in optimize_steps:
        s = float(step)
        yh = sigproc.smooth(
            y_hist, sigproc.GaussianKernel(2 * step), sampling_rate, num_bins=2048,
            ensure_unit_area=True) * optimize_steps.units

        # Equation from Matlab code, 7/2012
        c = (sp.sum(yh ** 2) * dt -
             2 * sp.sum(yh * y_hist) * dt +
             2 * 1 / sp.sqrt(2 * sp.pi) / s / N)
        C[s] = c * N * N
        progress.step()

    # Return kernel size with smallest cost
    return min(C, key=C.get) * optimize_steps.units
Esempio n. 3
0
def optimal_gauss_kernel_size(train, optimize_steps, progress=None):
    """ Return the optimal kernel size for a spike density estimation
    of a spike train for a gaussian kernel. This function takes a single
    spike train, which can be a superposition of multiple spike trains
    (created with :func:`collapsed_spike_trains`) that should be included
    in a spike density estimation.

    Implements the algorithm from
    (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010).

    :param train: The spike train for which the kernel
        size should be optimized.
    :type train: :class:`neo.core.SpikeTrain`
    :param optimize_steps: Array of kernel sizes to try (the best of
        these sizes will be returned).
    :type optimize_steps: Quantity 1D
    :param progress: Set this parameter to report progress. Will be
        advanced by len(`optimize_steps`) steps.
    :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator`
    :returns: Best of the given kernel sizes
    :rtype: Quantity scalar
    """
    if not progress:
        progress = ProgressIndicator()

    x = train.rescale(optimize_steps.units)
    steps = sp.asarray(optimize_steps)

    N = len(train)
    C = {}

    bins = sp.linspace(x.t_start, x.t_stop, 1025)
    dt = float(bins[1] - bins[0])
    y_hist = sp.histogram(x, bins)[0] / N / dt
    for step in steps:
        s = float(step)
        yh = _hist_density(y_hist, gauss_kernel, step,
            train.t_start, train.t_stop)

        # Equation from Matlab code, 7/2012
        c = (sp.sum(yh**2) * dt -
             2 * sp.sum(yh * y_hist) * dt +
             2 * 1 / sp.sqrt(2 * sp.pi) / s / N)
        C[s] = c * N * N
        progress.step()

    # Return kernel size with smallest cost
    return min(C, key=C.get)*train.units
Esempio n. 4
0
def main(config_filename):
    """
    The main simulation!
    For every configuration permutation, create a Simulated user object, and run the simulation (the while loop).
    Then save, report, and repeat ad naseum.
    """
    logging.basicConfig(filename='sim.log', level=logging.DEBUG)
    config_reader = SimulationConfigReader(config_filename)

    for configuration in config_reader:
        #print "Running experiment {base_id}...".format(base_id=configuration.base_id),

        user = SimulatedUser(configuration)
        progress = ProgressIndicator(configuration)
        configuration.output.display_config()

        while not configuration.user.logger.is_finished():
            #progress.update()  # Update the progress indicator in the terminal.
            user.decide_action()

        configuration.output.display_report()
        #print "complete."
        configuration.output.save()
        gc.collect()

    completed_file = open(
        os.path.join(config_reader.get_base_dir(), 'COMPLETED'), 'w')
    completed_file.close()
Esempio n. 5
0
def main(config_filename):
    """
    The main simulation!
    For every configuration permutation, create a Simulated user object, and run the simulation (the while loop).
    Then save, report, and repeat ad naseum.
    """
    config_reader = SimulationConfigReader(config_filename)
    
    for configuration in config_reader:
        user = SimulatedUser(configuration)
        user.show_query_list()
        progress = ProgressIndicator(configuration)
        
        configuration.output.display_config()
        
        while not configuration.user.logger.is_finished():
            progress.update()  # Update the progress indicator in the terminal.
            user.decide_action()
        
        configuration.output.save()
        configuration.output.display_report()
Esempio n. 6
0
def main(config_filename):
    """
    The main simulation!
    For every configuration permutation, create a Simulated user object, and run the simulation (the while loop).
    Then save, report, and repeat ad naseum.
    """
    config_reader = SimulationConfigReader(config_filename)

    for configuration in config_reader:
        user = SimulatedUser(configuration)
        user.show_query_list()
        progress = ProgressIndicator(configuration)

        configuration.output.display_config()

        while not configuration.user.logger.is_finished():
            progress.update()  # Update the progress indicator in the terminal.
            user.decide_action()

        configuration.output.save()
        configuration.output.display_report()
def get_refperiod_violations(spike_trains, refperiod, progress=None):
    """ Return the refractory period violations in the given spike trains
    for the specified refractory period.

    :param dict spike_trains: Dictionary of lists of
        :class:`neo.core.SpikeTrain` objects.
    :param refperiod: The refractory period (time).
    :type refperiod: Quantity scalar
    :param progress: Set this parameter to report progress.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Two values:

        * The total number of violations.
        * A dictionary (with the same indices as ``spike_trains``) of
          arrays with violation times (Quantity 1D with the same unit as
          ``refperiod``) for each spike train.
    :rtype: int, dict """
    if type(refperiod) != pq.Quantity or \
            refperiod.simplified.dimensionality != pq.s.dimensionality:
        raise ValueError('refperiod must be a time quantity!')

    if not progress:
        progress = ProgressIndicator()

    total_violations = 0
    violations = {}
    for u, tL in spike_trains.iteritems():
        violations[u] = []
        for i, t in enumerate(tL):
            st = t.copy()
            st.sort()
            isi = sp.diff(st)

            violations[u].append(st[isi < refperiod].rescale(refperiod.units))
            total_violations += len(violations[u][i])

            progress.step()

    return total_violations, violations
def get_refperiod_violations(spike_trains, refperiod, progress=None):
    """ Return the refractory period violations in the given spike trains
    for the specified refractory period.

    :param dict spike_trains: Dictionary of lists of
        :class:`neo.core.SpikeTrain` objects.
    :param refperiod: The refractory period (time).
    :type refperiod: Quantity scalar
    :param progress: Set this parameter to report progress.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Two values:

        * The total number of violations.
        * A dictionary (with the same indices as ``spike_trains``) of
          arrays with violation times (Quantity 1D with the same unit as
          ``refperiod``) for each spike train.
    :rtype: int, dict """
    if type(refperiod) != pq.Quantity or refperiod.simplified.dimensionality != pq.s.dimensionality:
        raise ValueError("refperiod must be a time quantity!")

    if not progress:
        progress = ProgressIndicator()

    total_violations = 0
    violations = {}
    for u, tL in spike_trains.iteritems():
        violations[u] = []
        for i, t in enumerate(tL):
            st = t.copy()
            st.sort()
            isi = sp.diff(st)

            violations[u].append(st[isi < refperiod].rescale(refperiod.units))
            total_violations += len(violations[u][i])

            progress.step()

    return total_violations, violations
Esempio n. 9
0
def execute(input_output_pairs, processor_file, processor_config=None):
    """
    :param input_output_pairs: [(input_file, output_file)] list
    :param processor_file: the *processor.py absolute path
    :param processor_config: the config map that can be interpreted by the processors
    :return:
    """
    progress = ProgressIndicator(
        "{}_process_{}".format(processor_file, str(os.getpid())),
        int(len(input_output_pairs)))
    progress.start()
    log = ProcessedFileLogger(os.getcwd() + os.path.sep +
                              "execution_{}.log".format(str(os.getpid())))
    for (input_file, output_file) in input_output_pairs:
        try:
            execute_with_file(input_file, output_file, processor_file,
                              processor_config)
            log.processed(input_file)
            progress.next()
        except Exception as e:
            log.failed(input_file + "\t" + str(e))
            progress.next()
    progress.finish()
Esempio n. 10
0
def spike_density_estimation(trains, start=0*pq.ms, stop=None,
                             kernel=gauss_kernel, kernel_size=100*pq.ms,
                             optimize_steps=None, progress=None):
    """ Create a spike density estimation from a dictionary of
    lists of spike trains.

    The spike density estimations give an estimate of the instantaneous
    rate. The density estimation is evaluated at 1024 equally spaced
    points covering the range of the input spike trains. Optionally finds
    optimal kernel size for given data using the algorithm from
    (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010).

    :param dict trains: A dictionary of :class:`neo.core.SpikeTrain` lists.
    :param start: The desired time for the start of the estimation. It
        will be recalculated if there are spike trains which start later
        than this time. This parameter can be negative (which could be
        useful when aligning on events).
    :type start: Quantity scalar
    :param stop: The desired time for the end of the estimation. It will
        be recalculated if there are spike trains which end earlier
        than this time.
    :type stop: Quantity scalar
    :param func kernel: The kernel function to use, should accept
        two parameters: A ndarray of distances and a kernel size.
        The total area under the kernel function sould be 1.
        Default: Gaussian kernel
    :param kernel_size: A uniform kernel size for all spike trains.
            Only used if optimization of kernel sizes is not used.
    :type kernel_size: Quantity scalar
    :param optimize_steps: An array of time lengths that will be
        considered in the kernel width optimization. Note that the
        optimization assumes a Gaussian kernel and will most likely
        not give the optimal kernel size if another kernel is used.
        If None, ``kernel_size`` will be used.
    :type optimize_steps: Quantity 1D
    :param progress: Set this parameter to report progress.
    :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator`

    :returns: Three values:

        * A dictionary of the spike density estimations (Quantity 1D in
          Hz). Indexed the same as ``trains``.
        * A dictionary of kernel sizes (Quantity scalars). Indexed the
          same as ``trains``.
        * The used evaluation points.
    :rtype: dict, dict, Quantity 1D
    """
    if not progress:
        progress = ProgressIndicator()

    if optimize_steps is None or len(optimize_steps) < 1:
        units = kernel_size.units
    else:
        units = optimize_steps.units

    # Prepare evaluation points
    max_start, max_stop = minimum_spike_train_interval(trains)

    start = max(start, max_start)
    start.units = units
    if stop is not None:
        stop = min(stop, max_stop)
    else:
        stop = max_stop
    stop.units = units
    bins = sp.linspace(start, stop, 1025)
    eval_points = bins[:-1] + (bins[1] - bins[0]) / 2

    if optimize_steps is None or len(optimize_steps) < 1:
        kernel_size = {u:kernel_size for u in trains}
    else:
        # Find optimal kernel size for all spike train sets
        progress.set_ticks(len(optimize_steps)*len(trains))
        progress.set_status('Calculating optimal kernel size')
        kernel_size = {}
        for u,t in trains.iteritems():
            c = collapsed_spike_trains(t)
            kernel_size[u] = optimal_gauss_kernel_size(
                c.time_slice(start,stop), optimize_steps, progress)

    progress.set_ticks(len(trains))
    progress.set_status('Creating spike density plot')

    # Calculate KDEs
    kde = {}
    for u,t in trains.iteritems():
        # Collapse spike trains
        collapsed = collapsed_spike_trains(t).rescale(units)
        ksize = float(kernel_size[u])

        # Create density estimation using convolution
        kde[u] = _train_density(collapsed.time_slice(start, stop),
            kernel, ksize) / len(trains[u]) / units
        kde[u].units = pq.Hz
    return kde, kernel_size, eval_points
Esempio n. 11
0
def spike_density_estimation(trains,
                             start=0 * pq.ms,
                             stop=None,
                             kernel=None,
                             kernel_size=100 * pq.ms,
                             optimize_steps=None,
                             progress=None):
    """ Create a spike density estimation from a dictionary of
    lists of spike trains.

    The spike density estimations give an estimate of the instantaneous
    rate. The density estimation is evaluated at 1024 equally spaced
    points covering the range of the input spike trains. Optionally finds
    optimal kernel size for given data using the algorithm from
    (Shimazaki, Shinomoto. Journal of Computational Neuroscience. 2010).

    :param dict trains: A dictionary of :class:`neo.core.SpikeTrain` lists.
    :param start: The desired time for the start of the estimation. It
        will be recalculated if there are spike trains which start later
        than this time. This parameter can be negative (which could be
        useful when aligning on events).
    :type start: Quantity scalar
    :param stop: The desired time for the end of the estimation. It will
        be recalculated if there are spike trains which end earlier
        than this time.
    :type stop: Quantity scalar
    :param kernel: The kernel function or instance to use, should accept
        two parameters: A ndarray of distances and a kernel size.
        The total area under the kernel function should be 1.
        Automatic optimization assumes a Gaussian kernel and will
        likely not produce optimal results for different kernels.
        Default: Gaussian kernel
    :type kernel: func or :class:`.signal_processing.Kernel`
    :param kernel_size: A uniform kernel size for all spike trains.
            Only used if optimization of kernel sizes is not used.
    :type kernel_size: Quantity scalar
    :param optimize_steps: An array of time lengths that will be
        considered in the kernel width optimization. Note that the
        optimization assumes a Gaussian kernel and will most likely
        not give the optimal kernel size if another kernel is used.
        If None, ``kernel_size`` will be used.
    :type optimize_steps: Quantity 1D
    :param progress: Set this parameter to report progress.
    :type progress: :class:`.progress_indicator.ProgressIndicator`

    :returns: Three values:

        * A dictionary of the spike density estimations (Quantity 1D in
          Hz). Indexed the same as ``trains``.
        * A dictionary of kernel sizes (Quantity scalars). Indexed the
          same as ``trains``.
        * The used evaluation points.
    :rtype: dict, dict, Quantity 1D
    """
    if not progress:
        progress = ProgressIndicator()

    if optimize_steps is None or len(optimize_steps) < 1:
        units = kernel_size.units
    else:
        units = optimize_steps.units

    if kernel is None:
        kernel = sigproc.GaussianKernel(100 * pq.ms)

    # Prepare evaluation points
    max_start, max_stop = tools.minimum_spike_train_interval(trains)

    start = max(start, max_start)
    start.units = units
    if stop is not None:
        stop = min(stop, max_stop)
    else:
        stop = max_stop
    stop.units = units
    bins = sp.linspace(start, stop, 1025)
    eval_points = bins[:-1] + (bins[1] - bins[0]) / 2

    if optimize_steps is None or len(optimize_steps) < 1:
        kernel_size = {u: kernel_size for u in trains}
    else:
        # Find optimal kernel size for all spike train sets
        progress.set_ticks(len(optimize_steps) * len(trains))
        progress.set_status('Calculating optimal kernel size')
        kernel_size = {}
        for u, t in trains.iteritems():
            c = collapsed_spike_trains(t)
            kernel_size[u] = optimal_gauss_kernel_size(
                c.time_slice(start, stop), optimize_steps, progress)

    progress.set_ticks(len(trains))
    progress.set_status('Creating spike density plot')

    # Calculate KDEs
    kde = {}
    for u, t in trains.iteritems():
        # Collapse spike trains
        collapsed = collapsed_spike_trains(t).rescale(units)
        scaled_kernel = sigproc.as_kernel_of_size(kernel, kernel_size[u])

        # Create density estimation using convolution
        sliced = collapsed.time_slice(start, stop)
        sampling_rate = 1024.0 / (sliced.t_stop - sliced.t_start)
        kde[u] = sigproc.st_convolve(sliced,
                                     scaled_kernel,
                                     sampling_rate,
                                     kernel_discretization_params={
                                         'num_bins': 2048,
                                         'ensure_unit_area': True
                                     })[0] / len(trains[u])
        kde[u].units = pq.Hz
    return kde, kernel_size, eval_points
Esempio n. 12
0
def spike_amplitude_histogram(trains, num_bins, uniform_y_scale=True, unit=pq.uV, progress=None):
    """ Return a spike amplitude histogram.

    The resulting is useful to assess the drift in spike amplitude over a
    longer recording. It shows histograms (one for each ``trains`` entry,
    e.g. segment) of maximum and minimum spike amplitudes.

    :param list trains: A list of lists of :class:`neo.core.SpikeTrain`
        objects. Each entry of the outer list will be one point on the
        x-axis (they could correspond to segments), all amplitude occurences
        of spikes contained in the inner list will be added up.
    :param int num_bins: Number of bins for the histograms.
    :param bool uniform_y_scale: If True, the histogram for each channel
        will use the same bins. Otherwise, the minimum bin range is computed
        separately for each channel.
    :param Quantity unit: Unit of Y-Axis.
    :param progress: Set this parameter to report progress.
    :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator`
    :return: A tuple with three values:

        * A three-dimensional histogram matrix, where the first dimension
          corresponds to bins, the second dimension to the entries of
          ``trains`` (e.g. segments) and the third dimension to channels.
        * A list of the minimum amplitude value for each channel (all values
          will be equal if ``uniform_y_scale`` is true).
        * A list of the maximum amplitude value for each channel (all values
          will be equal if ``uniform_y_scale`` is true).
    :rtype: (ndarray, list, list)
    """
    if not progress:
        progress = ProgressIndicator()

    num_channels = 1
    for t in trains:
        if not t:
            continue
        num_channels = t[0].waveforms.shape[2]
        break

    progress.set_ticks(2 * len(trains))
    progress.set_status("Calculating Spike Amplitude Histogram")

    # Find maximum and minimum amplitudes on all channels
    up = [0] * num_channels
    down = [0] * num_channels
    for t in trains:
        for s in t:
            if s.waveforms is None:
                continue
            if s.waveforms.shape[2] != num_channels:
                raise SpykeException(
                    "All spikes need to have the same " + "numer of channels for Spike Amplitude Histogram!"
                )
            a = sp.asarray(s.waveforms.rescale(unit))
            u = a.max(1)
            d = a.min(1)
            for c in xrange(num_channels):
                up[c] = max(up[c], sp.stats.mstats.mquantiles(u[:, c], [0.999])[0])
                down[c] = min(down[c], sp.stats.mstats.mquantiles(d[:, c], [0.001])[0])
            progress.step()

    if uniform_y_scale:
        up = [max(up)] * num_channels
        down = [min(down)] * num_channels

    # Create histogram
    bins = [sp.linspace(down[c], up[c], num_bins + 1) for c in xrange(num_channels)]
    hist = sp.zeros((num_bins, len(trains), num_channels))
    for i, t in enumerate(trains):
        for s in t:
            if s.waveforms is None:
                continue
            a = sp.asarray(s.waveforms.rescale(unit))
            upper = a.max(1)
            lower = a.min(1)
            for c in xrange(num_channels):
                hist[:, i, c] += sp.histogram(upper[:, c], bins[c])[0]
                hist[:, i, c] += sp.histogram(lower[:, c], bins[c])[0]
        progress.step()

    return hist, down, up
Esempio n. 13
0
def correlogram(trains, bin_size, max_lag=500 * pq.ms, border_correction=True,
                per_second=True, unit=pq.ms, progress=None):
    """ Return (cross-)correlograms from a dictionary of spike train
    lists for different units.

    :param dict trains: Dictionary of :class:`neo.core.SpikeTrain` lists.
    :param bin_size: Bin size (time).
    :type bin_size: Quantity scalar
    :param max_lag: Cut off (end time of calculated correlogram).
    :type max_lag: Quantity scalar
    :param bool border_correction: Apply correction for less data at higher
        timelags. Not perfect for bin_size != 1*``unit``, especially with
        large ``max_lag`` compared to length of spike trains.
    :param bool per_second: If ``True``, counts returned are per second.
        Otherwise, counts per spike train are returned.
    :param Quantity unit: Unit of X-Axis.
    :param progress: A ProgressIndicator object for the operation.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Two values:

        * An ordered dictionary indexed with the indices of ``trains`` of
          ordered dictionaries indexed with the same indices. Entries of
          the inner dictionaries are the resulting (cross-)correlograms as
          numpy arrays. All crosscorrelograms can be indexed in two
          different ways: ``c[index1][index2]`` and ``c[index2][index1]``.
        * The bins used for the correlogram calculation.
    :rtype: dict, Quantity 1D
    """
    if not progress:
        progress = ProgressIndicator()

    bin_size.rescale(unit)
    max_lag.rescale(unit)

    # Create bins, making sure that 0 is at the center of central bin
    half_bins = sp.arange(bin_size / 2, max_lag, bin_size)
    all_bins = list(reversed(-half_bins))
    all_bins.extend(half_bins)
    bins = sp.array(all_bins) * unit
    middle_bin = len(bins) / 2 - 1

    indices = trains.keys()
    num_trains = len(trains[indices[0]])
    if not num_trains:
        raise SpykeException('Could not create correlogram: No spike trains!')
    for u in range(1, len(indices)):
        if len(trains[indices[u]]) != num_trains:
            raise SpykeException('Could not create correlogram: All units ' +
                                 'need the same number of spike trains!')

    progress.set_ticks(sp.sum(range(len(trains) + 1) * num_trains))

    corrector = 1
    if border_correction:
        # Need safe min/max functions
        def safe_max(seq):
            if len(seq) < 1:
                return 0
            return max(seq)

        def safe_min(seq):
            if len(seq) < 1:
                return 2 ** 22  # Some arbitrary large value
            return min(seq)

        max_w = max([max([safe_max(t) for t in l])
                     for l in trains.itervalues()])
        min_w = min([min([safe_min(t) for t in l])
                     for l in trains.itervalues()])

        train_length = (max_w - min_w)
        l = int(round(middle_bin)) + 1
        cE = max(train_length - (l * bin_size) + 1 * unit, 1 * unit)

        corrector = (train_length / sp.concatenate(
            (sp.linspace(cE, train_length, l - 1, False),
             sp.linspace(train_length, cE, l)))).magnitude

    correlograms = OrderedDict()
    for i1 in xrange(len(indices)):  # For each index
        # For all later indices, including itself
        for i2 in xrange(i1, len(indices)):
            histogram = sp.zeros(len(bins) - 1)
            for t in xrange(num_trains):
                train1 = trains[indices[i1]][t].rescale(unit).reshape((1, -1))
                train2 = trains[indices[i2]][t].rescale(unit).reshape((-1, 1))
                histogram += sp.histogram(
                    sp.subtract(train1, train2), bins=bins)[0]
                if i1 == i2:  # Correction for autocorrelogram
                    histogram[middle_bin] -= len(train2)
                progress.step()

            if per_second:
                l = train1.t_stop - train1.t_start
                if train2.t_stop - train2.t_start != l:
                    raise SpykeException(
                        'A spike train pair does not have equal length,'
                        'cannot calculate count per second.')
                histogram /= l.rescale(pq.s)

            crg = corrector * histogram / num_trains
            if indices[i1] not in correlograms:
                correlograms[indices[i1]] = OrderedDict()
            correlograms[indices[i1]][indices[i2]] = crg
            if i1 != i2:
                if indices[i2] not in correlograms:
                    correlograms[indices[i2]] = OrderedDict()
                correlograms[indices[i2]][indices[i1]] = crg[::-1]

    return correlograms, bins
Esempio n. 14
0
def spike_amplitude_histogram(trains, num_bins, uniform_y_scale=True,
                              unit=pq.uV, progress=None):
    """ Return a spike amplitude histogram.

    The resulting is useful to assess the drift in spike amplitude over a
    longer recording. It shows histograms (one for each ``trains`` entry,
    e.g. segment) of maximum and minimum spike amplitudes.

    :param list trains: A list of lists of :class:`neo.core.SpikeTrain`
        objects. Each entry of the outer list will be one point on the
        x-axis (they could correspond to segments), all amplitude occurences
        of spikes contained in the inner list will be added up.
    :param int num_bins: Number of bins for the histograms.
    :param bool uniform_y_scale: If True, the histogram for each channel
        will use the same bins. Otherwise, the minimum bin range is computed
        separately for each channel.
    :param Quantity unit: Unit of Y-Axis.
    :param progress: Set this parameter to report progress.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :return: A tuple with three values:

        * A three-dimensional histogram matrix, where the first dimension
          corresponds to bins, the second dimension to the entries of
          ``trains`` (e.g. segments) and the third dimension to channels.
        * A list of the minimum amplitude value for each channel (all values
          will be equal if ``uniform_y_scale`` is true).
        * A list of the maximum amplitude value for each channel (all values
          will be equal if ``uniform_y_scale`` is true).
    :rtype: (ndarray, list, list)
    """
    if not progress:
        progress = ProgressIndicator()

    num_channels = 1
    for t in trains:
        if not t:
            continue
        num_channels = t[0].waveforms.shape[2]
        break

    progress.set_ticks(2*len(trains))
    progress.set_status('Calculating Spike Amplitude Histogram')

    # Find maximum and minimum amplitudes on all channels
    up = [0] * num_channels
    down = [0] * num_channels
    for t in trains:
        for s in t:
            if s.waveforms is None:
                continue
            if s.waveforms.shape[2] != num_channels:
                raise SpykeException('All spikes need to have the same ' +
                                     'numer of channels for Spike Amplitude Histogram!')
            a = sp.asarray(s.waveforms.rescale(unit))
            u = a.max(1)
            d = a.min(1)
            for c in xrange(num_channels):
                up[c] = max(up[c], sp.stats.mstats.mquantiles(
                    u[:,c], [0.999])[0])
                down[c] = min(down[c], sp.stats.mstats.mquantiles(
                    d[:,c], [0.001])[0])
            progress.step()

    if uniform_y_scale:
        up = [max(up)] * num_channels
        down = [min(down)] * num_channels

    # Create histogram
    bins = [sp.linspace(down[c],up[c], num_bins+1)
            for c in xrange(num_channels)]
    hist = sp.zeros((num_bins, len(trains), num_channels))
    for i, t in enumerate(trains):
        for s in t:
            if s.waveforms is None:
                continue
            a = sp.asarray(s.waveforms.rescale(unit))
            upper = a.max(1)
            lower = a.min(1)
            for c in xrange(num_channels):
                hist[:,i,c] += sp.histogram(upper[:,c], bins[c])[0]
                hist[:,i,c] += sp.histogram(lower[:,c], bins[c])[0]
        progress.step()

    return hist, down, up