Ejemplo n.º 1
0
    def test_cubic_errors(self):

        # Check error ouputs for mis-settings of the parameters

        # Empty signal
        self.assertRaises(
            ValueError, cubic.cubic,
            neo.AnalogSignalArray([] * pq.dimensionless,
                                  sampling_period=10 * pq.ms))

        # Multidimensional array
        self.assertRaises(
            ValueError, cubic.cubic,
            neo.AnalogSignalArray([[1, 2, 3], [1, 2, 3]] * pq.dimensionless,
                                  sampling_period=10 * pq.ms))
        self.assertRaises(ValueError, cubic.cubic,
                          numpy.array([[1, 2, 3], [1, 2, 3]]))

        # Negative alpha
        self.assertRaises(ValueError, cubic.cubic, self.data_array, alpha=-0.1)

        # Negative number of iterations ximax
        self.assertRaises(ValueError, cubic.cubic, self.data_array, ximax=-100)

        # Checking case in which the second cumulant of the signal is smaller
        # than the first cumulant (analitycal constrain of the method)
        self.assertRaises(ValueError,
                          cubic.cubic,
                          neo.AnalogSignalArray(numpy.array(
                              [1] * 1000).reshape(1000, 1),
                                                units=pq.dimensionless,
                                                sampling_period=10 * pq.ms),
                          alpha=self.alpha)
Ejemplo n.º 2
0
    def setUp(self):
        # Generate test data of a harmonic function over a long time
        time = np.arange(0, 1000, 0.1) * pq.ms
        freq = 10 * pq.Hz

        self.amplitude = np.array([
            np.linspace(1, 10, len(time)),
            np.linspace(1, 10, len(time)),
            np.ones((len(time))),
            np.ones((len(time))) * 10.
        ]).T
        self.phase = np.array([
            (time * freq).simplified.magnitude * 2. * np.pi,
            (time * freq).simplified.magnitude * 2. * np.pi + np.pi / 2,
            (time * freq).simplified.magnitude * 2. * np.pi + np.pi,
            (time * freq).simplified.magnitude * 2. * 2. * np.pi
        ]).T

        self.phase = np.mod(self.phase + np.pi, 2. * np.pi) - np.pi

        # rising amplitude cosine, random ampl. sine, flat inverse cosine,
        # flat cosine at double frequency
        sigs = np.vstack([
            self.amplitude[:, 0] * np.cos(self.phase[:, 0]),
            self.amplitude[:, 1] * np.cos(self.phase[:, 1]),
            self.amplitude[:, 2] * np.cos(self.phase[:, 2]),
            self.amplitude[:, 3] * np.cos(self.phase[:, 3])
        ])

        self.long_signals = neo.AnalogSignalArray(
            sigs.T,
            units='mV',
            t_start=0. * pq.ms,
            sampling_rate=(len(time) / (time[-1] - time[0])).rescale(pq.Hz),
            dtype=float)

        # Generate test data covering a single oscillation cycle in 1s only
        phases = np.arange(0, 2 * np.pi, np.pi / 256)
        sigs = np.vstack([
            np.sin(phases),
            np.cos(phases),
            np.sin(2 * phases),
            np.cos(2 * phases)
        ])

        self.one_period = neo.AnalogSignalArray(sigs.T,
                                                units=pq.mV,
                                                sampling_rate=len(phases) *
                                                pq.Hz)
Ejemplo n.º 3
0
    def test_butter_filter_function(self):
        # generate white noise AnalogSignalArray
        noise = neo.AnalogSignalArray(np.random.normal(size=5000),
                                      sampling_rate=1000 * pq.Hz,
                                      units='mV')

        # test if the filter performance is as well with filftunc=lfilter as
        # with filtfunc=filtfilt (i.e. default option)
        kwds = {
            'signal': noise,
            'highpass_freq': 250.0 * pq.Hz,
            'lowpass_freq': None,
            'filter_function': 'filtfilt'
        }
        filtered_noise = elephant.signal_processing.butter(**kwds)
        _, psd_filtfilt = spsig.welch(filtered_noise,
                                      nperseg=1024,
                                      fs=1000.0,
                                      detrend=lambda x: x)

        kwds['filter_function'] = 'lfilter'
        filtered_noise = elephant.signal_processing.butter(**kwds)
        _, psd_lfilter = spsig.welch(filtered_noise,
                                     nperseg=1024,
                                     fs=1000.0,
                                     detrend=lambda x: x)

        self.assertAlmostEqual(psd_filtfilt[0], psd_lfilter[0])
Ejemplo n.º 4
0
    def test_butter_input_types(self):
        # generate white noise data of different types
        noise_np = np.random.normal(size=5000)
        noise_pq = noise_np * pq.mV
        noise = neo.AnalogSignalArray(noise_pq, sampling_rate=1000.0 * pq.Hz)

        # check input as NumPy ndarray
        filtered_noise_np = elephant.signal_processing.butter(noise_np,
                                                              400.0,
                                                              100.0,
                                                              fs=1000.0)
        self.assertTrue(isinstance(filtered_noise_np, np.ndarray))
        self.assertFalse(isinstance(filtered_noise_np, pq.quantity.Quantity))
        self.assertFalse(isinstance(filtered_noise_np, neo.AnalogSignalArray))
        self.assertEqual(filtered_noise_np.shape, noise_np.shape)

        # check input as Quantity array
        filtered_noise_pq = elephant.signal_processing.butter(noise_pq,
                                                              400.0 * pq.Hz,
                                                              100.0 * pq.Hz,
                                                              fs=1000.0)
        self.assertTrue(isinstance(filtered_noise_pq, pq.quantity.Quantity))
        self.assertFalse(isinstance(filtered_noise_pq, neo.AnalogSignalArray))
        self.assertEqual(filtered_noise_pq.shape, noise_pq.shape)

        # check input as neo AnalogSignalArray
        filtered_noise = elephant.signal_processing.butter(
            noise, 400.0 * pq.Hz, 100.0 * pq.Hz)
        self.assertTrue(isinstance(filtered_noise, neo.AnalogSignalArray))
        self.assertEqual(filtered_noise.shape, noise.shape)

        # check if the results from different input types are identical
        self.assertTrue(
            np.all(filtered_noise_pq.magnitude == filtered_noise_np))
        self.assertTrue(np.all(filtered_noise.magnitude == filtered_noise_np))
Ejemplo n.º 5
0
 def get_weights(self):
     return neo.AnalogSignalArray(self._weights,
                                  units='nA',
                                  sampling_period=self.interval * ms,
                                  channel_index=numpy.arange(
                                      len(self._weights[0])),
                                  name="weight")
Ejemplo n.º 6
0
 def _get_current_segment(self,
                          filter_ids=None,
                          variables='all',
                          clear=False):
     segment = neo.Segment(
         name="segment%03d" % self._simulator.state.segment_counter,
         description=self.population.describe(),
         rec_datetime=datetime.now()
     )  # would be nice to get the time at the start of the recording, not the end
     variables_to_include = set(self.recorded.keys())
     if variables is not 'all':
         variables_to_include = variables_to_include.intersection(
             set(variables))
     for variable in variables_to_include:
         if variable == 'spikes':
             t_stop = self._simulator.state.t * pq.ms  # must run on all MPI nodes
             segment.spiketrains = [
                 neo.SpikeTrain(
                     self._get_spiketimes(id),
                     t_start=self._recording_start_time,
                     t_stop=t_stop,
                     units='ms',
                     source_population=self.population.label,
                     source_id=int(id),
                     source_index=self.population.id_to_index(id))
                 for id in sorted(self.filter_recorded(
                     'spikes', filter_ids))
             ]
         else:
             ids = sorted(self.filter_recorded(variable, filter_ids))
             signal_array = self._get_all_signals(variable,
                                                  ids,
                                                  clear=clear)
             t_start = self._recording_start_time
             sampling_period = self.sampling_interval * pq.ms
             current_time = self._simulator.state.t * pq.ms
             mpi_node = self._simulator.state.mpi_rank  # for debugging
             if signal_array.size > 0:  # may be empty if none of the recorded cells are on this MPI node
                 channel_indices = numpy.array(
                     [self.population.id_to_index(id) for id in ids])
                 units = self.population.find_units(variable)
                 source_ids = numpy.fromiter(ids, dtype=int)
                 segment.analogsignalarrays.append(
                     neo.AnalogSignalArray(
                         signal_array,
                         units=units,
                         t_start=t_start,
                         sampling_period=sampling_period,
                         name=variable,
                         source_population=self.population.label,
                         channel_index=channel_indices,
                         source_ids=source_ids))
                 logger.debug("%d **** ids=%s, channels=%s", mpi_node,
                              source_ids, channel_indices)
                 assert segment.analogsignalarrays[
                     0].t_stop - current_time < 2 * sampling_period
                 # need to add `Unit` and `RecordingChannelGroup` objects
     return segment
Ejemplo n.º 7
0
 def test_butter_multidim_input(self):
     noise_pq = np.random.normal(size=(4, 5000)) * pq.mV
     noise_neo = neo.AnalogSignalArray(noise_pq.T,
                                       sampling_rate=1000.0 * pq.Hz)
     noise_neo1d = neo.AnalogSignalArray(noise_pq[0],
                                         sampling_rate=1000.0 * pq.Hz)
     filtered_noise_pq = elephant.signal_processing.butter(noise_pq,
                                                           250.0,
                                                           fs=1000.0)
     filtered_noise_neo = elephant.signal_processing.butter(
         noise_neo, 250.0)
     filtered_noise_neo1d = elephant.signal_processing.butter(
         noise_neo1d, 250.0)
     self.assertTrue(
         np.all(
             filtered_noise_pq.magnitude == filtered_noise_neo.T.magnitude))
     self.assertTrue(
         np.all(filtered_noise_neo1d.magnitude ==
                filtered_noise_neo.T.magnitude[0]))
Ejemplo n.º 8
0
    def test_zscore_list_inplace(self):
        '''
        Test zscore on a list of AnalogSignalArray objects, asking for an
        inplace operation.
        '''
        signal1 = neo.AnalogSignalArray(np.transpose(
            np.vstack([self.test_seq1, self.test_seq1])),
                                        units='mV',
                                        t_start=0. * pq.ms,
                                        sampling_rate=1000. * pq.Hz,
                                        dtype=float)
        signal2 = neo.AnalogSignalArray(np.transpose(
            np.vstack([self.test_seq1, self.test_seq2])),
                                        units='mV',
                                        t_start=0. * pq.ms,
                                        sampling_rate=1000. * pq.Hz,
                                        dtype=float)
        signal_list = [signal1, signal2]

        m = np.mean(np.hstack([self.test_seq1, self.test_seq1]))
        s = np.std(np.hstack([self.test_seq1, self.test_seq1]))
        target11 = (self.test_seq1 - m) / s
        target21 = (self.test_seq1 - m) / s
        m = np.mean(np.hstack([self.test_seq1, self.test_seq2]))
        s = np.std(np.hstack([self.test_seq1, self.test_seq2]))
        target12 = (self.test_seq1 - m) / s
        target22 = (self.test_seq2 - m) / s

        # Call elephant function
        result = elephant.signal_processing.zscore(signal_list, inplace=True)

        assert_array_almost_equal(result[0].magnitude,
                                  np.transpose(np.vstack([target11,
                                                          target12])),
                                  decimal=9)
        assert_array_almost_equal(result[1].magnitude,
                                  np.transpose(np.vstack([target21,
                                                          target22])),
                                  decimal=9)

        # Assert original signal is overwritten
        self.assertEqual(signal1[0, 0].magnitude, target11[0])
        self.assertEqual(signal2[0, 0].magnitude, target21[0])
Ejemplo n.º 9
0
def complexity_pdf(spiketrains, binsize):
    """
    Complexity Distribution [1] of a list of :attr:`neo.SpikeTrain` objects.

    Probability density computed from the complexity histogram which is the
    histogram of the entries of the population histogram of clipped (binary)
    spike trains computed with a bin width of binsize.
    It provides for each complexity (== number of active neurons per bin) the
    number of occurrences. The normalization of that histogram to 1 is the
    probability density.

    Parameters
    ----------
    spiketrains : List of neo.SpikeTrain objects
    Spiketrains with a common time axis (same `t_start` and `t_stop`)
    binsize : quantities.Quantity
    Width of the histogram's time bins.

    Returns
    -------
    time_hist : neo.AnalogSignalArray
    A neo.AnalogSignalArray object containing the histogram values.
    `AnalogSignal[j]` is the histogram computed between .

    See also
    --------
    elephant.conversion.BinnedSpikeTrain

    References
    ----------
    [1]Gruen, S., Abeles, M., & Diesmann, M. (2008). Impact of higher-order
    correlations on coincidence distributions of massively parallel data.
    In Dynamic Brain-from Neural Spikes to Behaviors (pp. 96-114).
    Springer Berlin Heidelberg.

    """
    # Computing the population histogram with parameter binary=True to clip the
    # spike trains before summing
    pophist = time_histogram(spiketrains, binsize, binary=True)

    # Computing the histogram of the entries of pophist (=Complexity histogram)
    complexity_hist = np.histogram(pophist.magnitude,
                                   bins=range(0,
                                              len(spiketrains) + 2))[0]

    # Normalization of the Complexity Histogram to 1 (probabilty distribution)
    complexity_hist = complexity_hist / complexity_hist.sum()
    # Convert the Complexity pdf to an neo.AnalogSignalArray
    complexity_distribution = neo.AnalogSignalArray(
        np.array(complexity_hist).reshape(len(complexity_hist), 1) *
        pq.dimensionless,
        t_start=0 * pq.dimensionless,
        sampling_period=1 * pq.dimensionless)

    return complexity_distribution
Ejemplo n.º 10
0
 def setUp(self):
     n2 = 300
     n0 = 100000 - n2
     self.xi = 10
     self.data_signal = neo.AnalogSignalArray(
         numpy.array([self.xi] * n2 + [0] * n0).reshape(n0 + n2, 1) *
         pq.dimensionless,
         sampling_period=1 * pq.s)
     self.data_array = numpy.array([self.xi] * n2 + [0] * n0)
     self.alpha = 0.05
     self.ximax = 10
Ejemplo n.º 11
0
 def test_butter_missing_cutoff_freqs(self):
     # generate a dummy AnalogSignalArray
     anasig_dummy = neo.AnalogSignalArray(np.zeros(5000),
                                          sampling_rate=1000 * pq.Hz,
                                          units='mV')
     # test a case where no cut-off frequencies are given
     kwds = {
         'signal': anasig_dummy,
         'highpass_freq': None,
         'lowpass_freq': None
     }
     self.assertRaises(ValueError, elephant.signal_processing.butter,
                       **kwds)
Ejemplo n.º 12
0
 def test_butter_invalid_filter_function(self):
     # generate a dummy AnalogSignalArray
     anasig_dummy = neo.AnalogSignalArray(np.zeros(5000),
                                          sampling_rate=1000 * pq.Hz,
                                          units='mV')
     # test exception upon invalid filtfunc string
     kwds = {
         'signal': anasig_dummy,
         'highpass_freq': 250.0 * pq.Hz,
         'filter_function': 'invalid_filter'
     }
     self.assertRaises(ValueError, elephant.signal_processing.butter,
                       **kwds)
Ejemplo n.º 13
0
def segment_from_recording_device(devices,
                                  variables_to_include,
                                  id_lists,
                                  t_stop,
                                  name="segment00"):
    """
    Extract data from a NEST recording device and return it as a Neo Segment object.    
    """
    def get_data(device, variable, id_list):
        events = nest.GetStatus(device, 'events')[0]
        ids = events['senders']
        values = events[variable]
        data = {}
        for id in id_list:
            data[id] = values[ids == id]
        assert len(data) > 0
        return data

    segment = neo.Segment(name=name, rec_datetime=datetime.now())

    for device, variable, id_list in zip(devices, variables_to_include,
                                         id_lists):
        print(name, device, variable)
        data = get_data(device, variable, id_list)
        if variable == 'times':
            print("  adding spiketrain")
            id0 = min(id_list)
            segment.spiketrains = [
                neo.SpikeTrain(spiketrain,
                               t_start=0.0,
                               t_stop=t_stop,
                               units='ms',
                               source_id=id,
                               source_index=id - id0)
                for id, spiketrain in data.items()
            ]
        else:
            print("  adding signal")
            source_ids = np.array(id_list)
            channel_indices = source_ids - source_ids.min()
            signal_array = np.vstack(data.values()).T
            segment.analogsignalarrays = [
                neo.AnalogSignalArray(signal_array,
                                      units='mV',
                                      t_start=0.0 * ms,
                                      sampling_period=0.1 * ms,
                                      name=variable,
                                      channel_index=channel_indices,
                                      source_ids=source_ids)
            ]
    return segment
Ejemplo n.º 14
0
    def test_butter_filter_type(self):
        """
        Test if correct type of filtering is performed according to how cut-off
        frequencies are given
        """
        # generate white noise AnalogSignalArray
        noise = neo.AnalogSignalArray(np.random.normal(size=5000),
                                      sampling_rate=1000 * pq.Hz,
                                      units='mV')

        # test high-pass filtering: power at the lowest frequency
        # should be almost zero
        # Note: the default detrend function of scipy.signal.welch() seems to
        # cause artificial finite power at the lowest frequencies. Here I avoid
        # this by using an identity function for detrending
        filtered_noise = elephant.signal_processing.butter(
            noise, 250.0 * pq.Hz, None)
        _, psd = spsig.welch(filtered_noise,
                             nperseg=1024,
                             fs=1000.0,
                             detrend=lambda x: x)
        self.assertAlmostEqual(psd[0], 0)

        # test low-pass filtering: power at the highest frequency
        # should be almost zero
        filtered_noise = elephant.signal_processing.butter(
            noise, None, 250.0 * pq.Hz)
        _, psd = spsig.welch(filtered_noise, nperseg=1024, fs=1000.0)
        self.assertAlmostEqual(psd[-1], 0)

        # test band-pass filtering: power at the lowest and highest frequencies
        # should be almost zero
        filtered_noise = elephant.signal_processing.butter(
            noise, 200.0 * pq.Hz, 300.0 * pq.Hz)
        _, psd = spsig.welch(filtered_noise,
                             nperseg=1024,
                             fs=1000.0,
                             detrend=lambda x: x)
        self.assertAlmostEqual(psd[0], 0)
        self.assertAlmostEqual(psd[-1], 0)

        # test band-stop filtering: power at the intermediate frequency
        # should be almost zero
        filtered_noise = elephant.signal_processing.butter(
            noise, 400.0 * pq.Hz, 100.0 * pq.Hz)
        _, psd = spsig.welch(filtered_noise, nperseg=1024, fs=1000.0)
        self.assertAlmostEqual(psd[256], 0)
Ejemplo n.º 15
0
    def test_zscore_single_inplace_int(self):
        '''
        Test if the z-score is correctly calculated even if the input is an
        AnalogSignalArray of type int, asking for an inplace operation.
        '''
        signal = neo.AnalogSignalArray(self.test_seq1,
                                       units='mV',
                                       t_start=0. * pq.ms,
                                       sampling_rate=1000. * pq.Hz,
                                       dtype=int)

        m = np.mean(self.test_seq1)
        s = np.std(self.test_seq1)
        target = (self.test_seq1 - m) / s

        assert_array_almost_equal(elephant.signal_processing.zscore(
            signal, inplace=True).magnitude,
                                  target.astype(int),
                                  decimal=9)

        # Assert original signal is overwritten
        self.assertEqual(signal[0].magnitude, target.astype(int)[0])
Ejemplo n.º 16
0
    def test_zscore_single_dup_int(self):
        '''
        Test if the z-score is correctly calculated even if the input is an
        AnalogSignalArray of type int, asking for a duplicate (duplicate should
        be of type float).
        '''
        signal = neo.AnalogSignalArray(self.test_seq1,
                                       units='mV',
                                       t_start=0. * pq.ms,
                                       sampling_rate=1000. * pq.Hz,
                                       dtype=int)

        m = np.mean(self.test_seq1)
        s = np.std(self.test_seq1)
        target = (self.test_seq1 - m) / s

        assert_array_almost_equal(elephant.signal_processing.zscore(
            signal, inplace=False).magnitude,
                                  target,
                                  decimal=9)

        # Assert original signal is untouched
        self.assertEqual(signal.magnitude[0], self.test_seq1[0])
Ejemplo n.º 17
0
    def test_zscore_single_dup(self):
        '''
        Test z-score on a single AnalogSignalArray, asking to return a
        duplicate.
        '''
        signal = neo.AnalogSignalArray(self.test_seq1,
                                       units='mV',
                                       t_start=0. * pq.ms,
                                       sampling_rate=1000. * pq.Hz,
                                       dtype=float)

        m = np.mean(self.test_seq1)
        s = np.std(self.test_seq1)
        target = (self.test_seq1 - m) / s
        assert_array_equal(target, scipy.stats.zscore(self.test_seq1))

        result = elephant.signal_processing.zscore(signal, inplace=False)
        assert_array_almost_equal(result.magnitude, target, decimal=9)

        self.assertEqual(result.units, pq.Quantity(1. * pq.dimensionless))

        # Assert original signal is untouched
        self.assertEqual(signal[0].magnitude, self.test_seq1[0])
Ejemplo n.º 18
0
    def test_zscore_single_multidim_dup(self):
        '''
        Test z-score on a single AnalogSignal with multiple dimensions, asking
        to return a duplicate.
        '''
        signal = neo.AnalogSignalArray(np.transpose(
            np.vstack([self.test_seq1, self.test_seq2])),
                                       units='mV',
                                       t_start=0. * pq.ms,
                                       sampling_rate=1000. * pq.Hz,
                                       dtype=float)

        m = np.mean(signal.magnitude, axis=0, keepdims=True)
        s = np.std(signal.magnitude, axis=0, keepdims=True)
        target = (signal.magnitude - m) / s

        assert_array_almost_equal(elephant.signal_processing.zscore(
            signal, inplace=False).magnitude,
                                  target,
                                  decimal=9)

        # Assert original signal is untouched
        self.assertEqual(signal[0, 0].magnitude, self.test_seq1[0])
Ejemplo n.º 19
0
    def test_zscore_single_inplace(self):
        '''
        Test z-score on a single AnalogSignalArray, asking for an inplace
        operation.
        '''
        signal = neo.AnalogSignalArray(self.test_seq1,
                                       units='mV',
                                       t_start=0. * pq.ms,
                                       sampling_rate=1000. * pq.Hz,
                                       dtype=float)

        m = np.mean(self.test_seq1)
        s = np.std(self.test_seq1)
        target = (self.test_seq1 - m) / s

        result = elephant.signal_processing.zscore(signal, inplace=True)

        assert_array_almost_equal(result.magnitude, target, decimal=9)

        self.assertEqual(result.units, pq.Quantity(1. * pq.dimensionless))

        # Assert original signal is overwritten
        self.assertEqual(signal[0].magnitude, target[0])
    def _cch_memory(st_1, st_2, win, mode, norm, border_corr, binary, kern):

        # Check that the spike trains are binned with the saem temporal
        # resolution
        if not st1.matrix_rows == 1:
            raise AssertionError("Spike train must be one dimensional")
        if not st2.matrix_rows == 1:
            raise AssertionError("Spike train must be one dimensional")
        if not st1.binsize == st2.binsize:
            raise AssertionError("Bin sizes must be equal")

        # Retrieve unclipped matrix
        st1_spmat = st_1.to_sparse_array()
        st2_spmat = st_2.to_sparse_array()
        binsize = st_1.binsize
        max_num_bins = max(st_1.num_bins, st_2.num_bins)

        # Set the time window in which is computed the cch
        if win is not None:
            # Window parameter given in number of bins (integer)
            if isinstance(win[0], int) and isinstance(win[1], int):
                # Check the window parameter values
                if win[0] >= win[1] or win[0] <= -max_num_bins \
                        or win[1] >= max_num_bins:
                    raise ValueError(
                        "The window exceeds the length of the spike trains")
                # Assign left and right edges of the cch
                l, r = win[0], win[1]
            # Window parameter given in time units
            else:
                # Check the window parameter values
                if win[0].rescale(binsize.units).magnitude % \
                    binsize.magnitude != 0 or win[1].rescale(
                        binsize.units).magnitude % binsize.magnitude != 0:
                    raise ValueError(
                        "The window has to be a multiple of the binsize")
                if win[0] >= win[1] or win[0] <= -max_num_bins * binsize \
                        or win[1] >= max_num_bins * binsize:
                    raise ValueError("The window exceeds the length of the"
                                     " spike trains")
                # Assign left and right edges of the cch
                l, r = int(win[0].rescale(binsize.units) / binsize), int(
                    win[1].rescale(binsize.units) / binsize)
        # Case without explicit window parameter
        else:
            # cch computed for all the possible entries
            if mode == 'full':
                # Assign left and right edges of the cch
                r = st_2.num_bins - 1
                l = -st_1.num_bins + 1
            # cch compute only for the entries that completely overlap
            elif mode == 'valid':
                # Assign left and right edges of the cch
                r = max(st_2.num_bins - st_1.num_bins, 0)
                l = min(st_2.num_bins - st_1.num_bins, 0)
            # Check the mode parameter
            else:
                raise KeyError("The possible entries for mode parameter are" +
                               "'full' and 'valid'")

        # For each row, extract the nonzero column indices
        # and the corresponding # data in the matrix (for performance reasons)
        st1_bin_idx_unique = st1_spmat.nonzero()[1]
        st2_bin_idx_unique = st2_spmat.nonzero()[1]

        # Case with binary entries
        if binary:
            st1_bin_counts_unique = np.array(st1_spmat.data > 0, dtype=int)
            st2_bin_counts_unique = np.array(st2_spmat.data > 0, dtype=int)
        # Case with all values
        else:
            st1_bin_counts_unique = st1_spmat.data
            st2_bin_counts_unique = st2_spmat.data

        # Initialize the counts to an array of zeroes,
        # and the bin IDs to integers
        # spanning the time axis
        counts = np.zeros(np.abs(l) + np.abs(r) + 1)
        bin_ids = np.arange(l, r + 1)
        # Compute the CCH at lags in l,...,r only
        for idx, i in enumerate(st1_bin_idx_unique):
            timediff = st2_bin_idx_unique - i
            timediff_in_range = np.all([timediff >= l, timediff <= r], axis=0)
            timediff = (timediff[timediff_in_range]).reshape((-1, ))
            counts[timediff + np.abs(l)] += st1_bin_counts_unique[idx] * \
                st2_bin_counts_unique[timediff_in_range]

        # Correct the values taking into account lacking contributes
        # at the edges
        if border_corr is True:
            correction = float(max_num_bins + 1) / np.array(
                max_num_bins + 1 - abs(np.arange(l, r + 1)), float)
            counts = counts * correction

        # Define the kern for smoothing as an ndarray
        if hasattr(kern, '__iter__'):
            if len(kern) > np.abs(l) + np.abs(r) + 1:
                raise ValueError(
                    'The length of the kernel cannot be larger than the '
                    'length %d of the resulting CCH.' %
                    (np.abs(l) + np.abs(r) + 1))
            kern = np.array(kern, dtype=float)
            kern = 1. * kern / sum(kern)
        # Check kern parameter
        elif kern is not None:
            raise ValueError('Invalid smoothing kernel.')

        # Smooth the cross-correlation histogram with the kern
        if kern is not None:
            counts = np.convolve(counts, kern, mode='same')

        # Rescale the histogram so that the central bin has height 1,
        # if requested
        if norm and l <= 0 <= r:
            if counts[np.abs(l)] != 0:
                counts = counts / counts[np.abs(l)]
            else:
                warnings.warn('CCH not normalized because no value for 0 lag')

        # Transform the array count into an AnalogSignalArray
        cch_result = neo.AnalogSignalArray(
            signal=counts.reshape(counts.size, 1),
            units=pq.dimensionless,
            t_start=(bin_ids[0] - 0.5) * st_1.binsize,
            sampling_period=st_1.binsize)
        # Return only the hist_bins bins and counts before and after the
        # central one
        return cch_result, bin_ids
Ejemplo n.º 21
0
def instantaneous_rate(spiketrain,
                       sampling_period,
                       kernel='auto',
                       cutoff=5.0,
                       t_start=None,
                       t_stop=None,
                       trim=False):
    """
    Estimates instantaneous firing rate by kernel convolution.

    Parameters
    -----------
    spiketrain : 'neo.SpikeTrain'
        Neo object that contains spike times, the unit of the time stamps
        and t_start and t_stop of the spike train.
    sampling_period : Time Quantity
        Time stamp resolution of the spike times. The same resolution will
        be assumed for the kernel
    kernel : string 'auto' or callable object of :class:`Kernel` from module
        'kernels.py'. Currently implemented kernel forms are rectangular,
        triangular, epanechnikovlike, gaussian, laplacian, exponential,
        and alpha function.
        Example: kernel = kernels.RectangularKernel(sigma=10*ms, invert=False)
        The kernel is used for convolution with the spike train and its
        standard deviation determines the time resolution of the instantaneous
        rate estimation.
        Default: 'auto'. In this case, the optimized kernel width for the 
        rate estimation is calculated according to [1] and with this width
        a gaussian kernel is constructed. Automatized calculation of the 
        kernel width is not available for other than gaussian kernel shapes.
    cutoff : float
        This factor determines the cutoff of the probability distribution of
        the kernel, i.e., the considered width of the kernel in terms of 
        multiples of the standard deviation sigma.
        Default: 5.0
    t_start : Time Quantity (optional)
        Start time of the interval used to compute the firing rate. If None
        assumed equal to spiketrain.t_start
        Default: None
    t_stop : Time Quantity (optional)
        End time of the interval used to compute the firing rate (included).
        If None assumed equal to spiketrain.t_stop
        Default: None
    trim : bool
        if False, the output of the Fast Fourier Transformation being a longer
        vector than the input vector by the size of the kernel is reduced back
        to the original size of the considered time interval of the spiketrain
        using the median of the kernel.
        if True, only the region of the convolved signal is returned, where
        there is complete overlap between kernel and spike train. This is
        achieved by reducing the length of the output of the Fast Fourier
        Transformation by a total of two times the size of the kernel, and
        t_start and t_stop are adjusted.
        Default: False

    Returns
    -------
    rate : neo.AnalogSignalArray
        Contains the rate estimation in unit hertz (Hz).
        Has a property 'rate.times' which contains the time axis of the rate
        estimate. The unit of this property is the same as the resolution that
        is given via the argument 'sampling_period' to the function.

    Raises
    ------
    TypeError:
        If `spiketrain` is not an instance of :class:`SpikeTrain` of Neo.
        If `sampling_period` is not a time quantity.
        If `kernel` is neither instance of :class:`Kernel` or string 'auto'.
        If `cutoff` is neither float nor int.
        If `t_start` and `t_stop` are neither None nor a time quantity.
        If `trim` is not bool.

    ValueError:
        If `sampling_period` is smaller than zero.

    Example
    --------
    kernel = kernels.AlphaKernel(sigma = 0.05*s, invert = True)
    rate = instantaneous_rate(spiketrain, sampling_period = 2*ms, kernel)

    References
    ----------
    ..[1] H. Shimazaki, S. Shinomoto, J Comput Neurosci (2010) 29:171–182.

    """
    # Checks of input variables:
    if not isinstance(spiketrain, SpikeTrain):
        raise TypeError(
            "spiketrain must be instance of :class:`SpikeTrain` of Neo!\n"
            "    Found: %s, value %s" % (type(spiketrain), str(spiketrain)))

    if not (isinstance(sampling_period, pq.Quantity)
            and sampling_period.dimensionality.simplified == pq.Quantity(
                1, "s").dimensionality):
        raise TypeError("The sampling period must be a time quantity!\n"
                        "    Found: %s, value %s" %
                        (type(sampling_period), str(sampling_period)))

    if sampling_period.magnitude < 0:
        raise ValueError("The sampling period must be larger than zero.")

    if kernel == 'auto':
        kernel_width = sskernel(spiketrain.magnitude, tin=None,
                                bootstrap=True)['optw']
        unit = spiketrain.units
        sigma = 1 / (2.0 * 2.7) * kernel_width * unit
        # factor 2.0 connects kernel width with its half width,
        # factor 2.7 connects half width of Gaussian distribution with
        #             99% probability mass with its standard deviation.
        kernel = kernels.GaussianKernel(sigma)
    elif not isinstance(kernel, kernels.Kernel):
        raise TypeError("kernel must be either instance of :class:`Kernel` "
                        "or the string 'auto'!\n"
                        "    Found: %s, value %s" %
                        (type(kernel), str(kernel)))

    if not (isinstance(cutoff, float) or isinstance(cutoff, int)):
        raise TypeError("cutoff must be float or integer!")

    if not (t_start is None or (isinstance(t_start, pq.Quantity)
                                and t_start.dimensionality.simplified
                                == pq.Quantity(1, "s").dimensionality)):
        raise TypeError("t_start must be a time quantity!")

    if not (t_stop is None or (isinstance(t_stop, pq.Quantity)
                               and t_stop.dimensionality.simplified
                               == pq.Quantity(1, "s").dimensionality)):
        raise TypeError("t_stop must be a time quantity!")

    if not (isinstance(trim, bool)):
        raise TypeError("trim must be bool!")

    # main function:
    units = pq.CompoundUnit("%s*s" %
                            str(sampling_period.rescale('s').magnitude))
    spiketrain = spiketrain.rescale(units)
    if t_start is None:
        t_start = spiketrain.t_start
    else:
        t_start = t_start.rescale(spiketrain.units)

    if t_stop is None:
        t_stop = spiketrain.t_stop
    else:
        t_stop = t_stop.rescale(spiketrain.units)

    time_vector = np.zeros(int((t_stop - t_start)) + 1)

    spikes_slice = spiketrain.time_slice(t_start, t_stop) \
        if len(spiketrain) else np.array([])

    for spike in spikes_slice:
        index = int((spike - t_start))
        time_vector[index] += 1

    if cutoff < kernel.min_cutoff:
        cutoff = kernel.min_cutoff
        warnings.warn("The width of the kernel was adjusted to a minimally "
                      "allowed width.")

    t_arr = np.arange(
        -cutoff * kernel.sigma.rescale(units).magnitude,
        cutoff * kernel.sigma.rescale(units).magnitude +
        sampling_period.rescale(units).magnitude,
        sampling_period.rescale(units).magnitude) * units

    r = scipy.signal.fftconvolve(time_vector,
                                 kernel(t_arr).rescale(pq.Hz).magnitude,
                                 'full')
    if np.any(r < 0):
        warnings.warn("Instantaneous firing rate approximation contains "
                      "negative values, possibly caused due to machine "
                      "precision errors.")

    if not trim:
        r = r[kernel.median_index(t_arr):-(kernel(t_arr).size -
                                           kernel.median_index(t_arr))]
    elif trim:
        r = r[2 * kernel.median_index(t_arr):-2 *
              (kernel(t_arr).size - kernel.median_index(t_arr))]
        t_start += kernel.median_index(t_arr) * spiketrain.units
        t_stop -= (kernel(t_arr).size -
                   kernel.median_index(t_arr)) * spiketrain.units

    rate = neo.AnalogSignalArray(signal=r.reshape(r.size, 1),
                                 sampling_period=sampling_period,
                                 units=pq.Hz,
                                 t_start=t_start,
                                 t_stop=t_stop)

    return rate
Ejemplo n.º 22
0
def time_histogram(spiketrains, binsize, t_start=None, t_stop=None,
                   output='counts', binary=False):

    """
    Time Histogram of a list of :attr:`neo.SpikeTrain` objects.

    Parameters
    ----------
    spiketrains : List of neo.SpikeTrain objects
    Spiketrains with a common time axis (same `t_start` and `t_stop`)
    binsize : quantities.Quantity
    Width of the histogram's time bins.
    t_start, t_stop : Quantity (optional)
    Start and stop time of the histogram. Only events in the input
    `spiketrains` falling between `t_start` and `t_stop` (both included)
    are considered in the histogram. If `t_start` and/or `t_stop` are not
    specified, the maximum `t_start` of all :attr:spiketrains is used as
    `t_start`, and the minimum `t_stop` is used as `t_stop`.
    Default: t_start = t_stop = None
    output : str (optional)
    Normalization of the histogram. Can be one of:
    * `counts`'`: spike counts at each bin (as integer numbers)
    * `mean`: mean spike counts per spike train
    * `rate`: mean spike rate per spike train. Like 'mean', but the
      counts are additionally normalized by the bin width.
    binary : bool (optional)
    If **True**, indicates whether all spiketrain objects should first
    binned to a binary representation (using the `BinnedSpikeTrain` class
    in the `conversion` module) and the calculation of the histogram is
    based on this representation.
    Note that the output is not binary, but a histogram of the converted,
    binary representation.
    Default: False

    Returns
    -------
    time_hist : neo.AnalogSignalArray
    A neo.AnalogSignalArray object containing the histogram values.
    `AnalogSignal[j]` is the histogram computed between
    `t_start + j * binsize` and `t_start + (j + 1) * binsize`.

    See also
    --------
    elephant.conversion.BinnedSpikeTrain
    """
    min_tstop = 0
    if t_start is None:
        # Find the internal range for t_start, where all spike trains are
        # defined; cut all spike trains taking that time range only
        max_tstart, min_tstop = _get_start_stop_from_input(spiketrains)
        t_start = max_tstart
        if not all([max_tstart == t.t_start for t in spiketrains]):
            warnings.warn(
                "Spiketrains have different t_start values -- "
                "using maximum t_start as t_start.")

    if t_stop is None:
        # Find the internal range for t_stop
        if min_tstop:
            t_stop = min_tstop
            if not all([min_tstop == t.t_stop for t in spiketrains]):
                warnings.warn(
                    "Spiketrains have different t_stop values -- "
                    "using minimum t_stop as t_stop.")
        else:
            min_tstop = _get_start_stop_from_input(spiketrains)[1]
            t_stop = min_tstop
            if not all([min_tstop == t.t_stop for t in spiketrains]):
                warnings.warn(
                    "Spiketrains have different t_stop values -- "
                    "using minimum t_stop as t_stop.")

    sts_cut = [st.time_slice(t_start=t_start, t_stop=t_stop) for st in
               spiketrains]

    # Bin the spike trains and sum across columns
    bs = BinnedSpikeTrain(sts_cut, t_start=t_start, t_stop=t_stop,
                          binsize=binsize)

    if binary:
        bin_hist = bs.to_sparse_bool_array().sum(axis=0)
    else:
        bin_hist = bs.to_sparse_array().sum(axis=0)
    # Flatten array
    bin_hist = np.ravel(bin_hist)
    # Renormalise the histogram
    if output == 'counts':
        # Raw
        bin_hist = bin_hist * pq.dimensionless
    elif output == 'mean':
        # Divide by number of input spike trains
        bin_hist = bin_hist * 1. / len(spiketrains) * pq.dimensionless
    elif output == 'rate':
        # Divide by number of input spike trains and bin width
        bin_hist = bin_hist * 1. / len(spiketrains) / binsize
    else:
        raise ValueError('Parameter output is not valid.')

    return neo.AnalogSignalArray(signal=bin_hist.reshape(bin_hist.size, 1),
                                 sampling_period=binsize, units=bin_hist.units,
                                 t_start=t_start)
Ejemplo n.º 23
0
    def read_in_signal(self, segment, block, signal_array, data_indexes,
                       view_indexes, variable, recording_start_time,
                       sampling_interval, units, label):
        """ Reads in a data item that's not spikes (likely v, gsyn e, gsyn i)\
        and saves this data to the segment.

        :param segment: Segment to add data to
        :type segment: neo.Segment
        :param block: neo block
        :type block: neo.Block
        :param signal_array: the raw signal data
        :type signal_array: nparray
        :param data_indexes: The indexes for the recorded data
        :type data_indexes: list(int)
        :param view_indexes: The indexes for which data should be returned.\
            If None all data (view_index = data_indexes)
        :type view_indexes: list(int)
        :param variable: the variable name
        :param recording_start_time: when recording started
        :param sampling_interval: how often a neuron is recorded
        :param units: the units of the recorded value
        :param label: human readable label
        """
        # pylint: disable=too-many-arguments
        t_start = recording_start_time * quantities.ms
        sampling_period = sampling_interval * quantities.ms
        if view_indexes is None:
            indexes = numpy.array(data_indexes)
        elif view_indexes == data_indexes:
            indexes = numpy.array(data_indexes)
        else:
            # keep just the view indexes in the data
            indexes = [i for i in view_indexes if i in data_indexes]
            # keep just data columns in the view
            map_indexes = [data_indexes.index(i) for i in indexes]
            signal_array = signal_array[:, map_indexes]

        ids = list(map(self._population.index_to_id, indexes))
        if pynn8_syntax:
            data_array = neo.AnalogSignalArray(signal_array,
                                               units=units,
                                               t_start=t_start,
                                               sampling_period=sampling_period,
                                               name=variable,
                                               source_population=label,
                                               channel_index=indexes,
                                               source_ids=ids)
            data_array.shape = (data_array.shape[0], data_array.shape[1])
            segment.analogsignalarrays.append(data_array)
        else:
            data_array = neo.AnalogSignal(signal_array,
                                          units=units,
                                          t_start=t_start,
                                          sampling_period=sampling_period,
                                          name=variable,
                                          source_population=label,
                                          source_ids=ids)
            channel_index = _get_channel_index(indexes, block)
            data_array.channel_index = channel_index
            data_array.shape = (data_array.shape[0], data_array.shape[1])
            segment.analogsignals.append(data_array)
            channel_index.analogsignals.append(data_array)
    def _cch_fast(st_1, st_2, win, mode, norm, border_corr, binary, kern):

        # Check that the spike trains are binned with the same temporal
        # resolution
        if not st1.matrix_rows == 1:
            raise AssertionError("Spike train must be one dimensional")
        if not st2.matrix_rows == 1:
            raise AssertionError("Spike train must be one dimensional")
        if not st1.binsize == st2.binsize:
            raise AssertionError("Bin sizes must be equal")

        # Retrieve the array of the binne spik train
        st1_arr = st1.to_array()[0, :]
        st2_arr = st2.to_array()[0, :]
        binsize = st1.binsize

        # Convert the to binary version
        if binary:
            st1_arr = np.array(st1_arr > 0, dtype=int)
            st2_arr = np.array(st2_arr > 0, dtype=int)
        max_num_bins = max(len(st1_arr), len(st2_arr))

        # Cross correlate the spiketrains

        # Case explicit temporal window
        if win is not None:
            # Window parameter given in number of bins (integer)
            if isinstance(win[0], int) and isinstance(win[1], int):
                # Check the window parameter values
                if win[0] >= win[1] or win[0] <= -max_num_bins \
                        or win[1] >= max_num_bins:
                    raise ValueError(
                        "The window exceed the length of the spike trains")
                # Assign left and right edges of the cch
                l, r = win[0], win[1]
            # Window parameter given in time units
            else:
                # Check the window parameter values
                if win[0].rescale(binsize.units).magnitude % \
                    binsize.magnitude != 0 or win[1].rescale(
                        binsize.units).magnitude % binsize.magnitude != 0:
                    raise ValueError(
                        "The window has to be a multiple of the binsize")
                if win[0] >= win[1] or win[0] <= -max_num_bins * binsize \
                        or win[1] >= max_num_bins * binsize:
                    raise ValueError("The window exceed the length of the"
                                     " spike trains")
                # Assign left and right edges of the cch
                l, r = int(win[0].rescale(binsize.units) / binsize), int(
                    win[1].rescale(binsize.units) / binsize)

            # Cross correlate the spike trains
            corr = np.correlate(st2_arr, st1_arr, mode='full')
            counts = corr[len(st1_arr) + l + 1:len(st1_arr) + 1 + r + 1]

        # Case generic
        else:
            # Cross correlate the spike trains
            counts = np.correlate(st2_arr, st1_arr, mode=mode)
            # Assign the edges of the cch for the different mode parameters
            if mode == 'full':
                # Assign left and right edges of the cch
                r = st_2.num_bins - 1
                l = -st_1.num_bins + 1
            # cch compute only for the entries that completely overlap
            elif mode == 'valid':
                # Assign left and right edges of the cch
                r = max(st_2.num_bins - st_1.num_bins, 0)
                l = min(st_2.num_bins - st_1.num_bins, 0)
        bin_ids = np.r_[l:r + 1]

        # Correct the values taking into account lacking contributes
        # at the edges
        if border_corr is True:
            correction = float(max_num_bins + 1) / np.array(
                max_num_bins + 1 - abs(np.arange(l, r + 1)), float)
            counts = counts * correction

        # Define the kern for smoothing as an ndarray
        if hasattr(kern, '__iter__'):
            if len(kern) > np.abs(l) + np.abs(r) + 1:
                raise ValueError(
                    'The length of the kernel cannot be larger than the '
                    'length %d of the resulting CCH.' %
                    (np.abs(l) + np.abs(r) + 1))
            kern = np.array(kern, dtype=float)
            kern = 1. * kern / sum(kern)
        # Check kern parameter
        elif kern is not None:
            raise ValueError('Invalid smoothing kernel.')

        # Smooth the cross-correlation histogram with the kern
        if kern is not None:
            counts = np.convolve(counts, kern, mode='same')

        # Rescale the histogram so that the central bin has height 1,
        # if requested
        if norm and l <= 0 <= r:
            if counts[np.abs(l)] != 0:
                counts = counts / counts[np.abs(l)]
            else:
                warnings.warn('CCH not normalized because no value for 0 lag')

        # Transform the array count into an AnalogSignalArray
        cch_result = neo.AnalogSignalArray(
            signal=counts.reshape(counts.size, 1),
            units=pq.dimensionless,
            t_start=(bin_ids[0] - 0.5) * st_1.binsize,
            sampling_period=st_1.binsize)
        # Return only the hist_bins bins and counts before and after the
        # central one
        return cch_result, bin_ids
Ejemplo n.º 25
0
def estimate_csd(lfp,
                 coord_electrode,
                 sigma,
                 method='standard',
                 diam=None,
                 h=None,
                 sigma_top=None,
                 tol=1E-6,
                 num_steps=200,
                 f_type='identity',
                 f_order=None):
    """
    Estimates current source density (CSD) from local field potential (LFP)
    recordings from multiple depths of the cortex.

    Parameters
    ----------
    lfp : neo.AnalogSignalArray
        LFP signals from which CSD is estimated.
    coord_electrode : Quantity array
        Depth of evenly spaced electrode contact points.
    sigma : Quantity float
        Conductivity of tissue.
    method : string
        CSD estimation method, either of 'standard': the standard
        double-derivative method, 'delta': delta-iCSD method, 'step':
        step-iCSD method, 'spline': spline-iCSD method. Default is 'standard'
    diam : Quantity float
        Diamater of the assumed circular planar current sources centered at
        each contact, required by iCSD methods (= 'delta', 'step',
        'spline'). Default is `None`.
    h : float or np.ndarray * quantity.Quantity
        assumed thickness of the source cylinders at all or each contact
    sigma_top : Quantity float
        Conductivity on top of tissue. When set to `None`, the same value as
        sigma: is used. Default is `None`.
    tol : float
        Tolerance of numerical integration, required by step- and
        spline-iCSD methods. Default is 1E-6.
    num_steps : int
        Number of data points for the spatially upsampled LFP/CSD data,
        required by spline-iCSD method. Default is 200.
    f_type : string
        Type of spatial filter used for smoothing of the result, either of
        'boxcar' (uses `scipy.signal.baxcar()`), 'hamming' (
        `scipy.signal.hamming()`), 'triangular' (`scipy.signal.tri()`),
        'gaussian' (`scipy.signal.gaussian`), 'identity' (no smoothing is
        applied). Default is 'identity'.
    f_order : float tuple
        Parameters to be passed to the scipy.signal function associated with
        the specified filter type.


    Returns
    -------
    tuple : (csd, csd_filtered)
        csd : neo.AnalogSignalArray
            Estimated CSD
        csd_filtered : neo.AnalogSignalArray
            Estimated CSD, spatially filtered


    Example
    -------
    import numpy as np
    import matplotlib.pyplot as plt
    from scipy import io
    import quantities as pq
    import neo

    import icsd


    #loading test data
    test_data = io.loadmat('test_data.mat')

    #prepare lfp data for use, by changing the units to SI and append
    #quantities, along with electrode geometry and conductivities
    lfp_data = test_data['pot1'] * 1E-3 * pq.V        # [mV] -> [V]
    z_data = np.linspace(100E-6, 2300E-6, 23) * pq.m  # [m]
    diam = 500E-6 * pq.m                              # [m]
    sigma = 0.3 * pq.S / pq.m                         # [S/m] or [1/(ohm*m)]
    sigma_top = 0. * pq.S / pq.m                      # [S/m] or [1/(ohm*m)]

    lfp = neo.AnalogSignalArray(lfp_data.T, sampling_rate=2.0*pq.kHz)

    # Input dictionaries for each method
    params = {}
    params['delta'] = {
        'method': 'delta',
        'lfp' : lfp,
        'coord_electrode' : z_data,
        'diam' : diam,        # source diameter
        'sigma' : sigma,           # extracellular conductivity
        'sigma_top' : sigma,       # conductivity on top of cortex
    }
    params['step'] = {
        'method': 'step',
        'lfp' : lfp,
        'coord_electrode' : z_data,
        'diam' : diam,
        'sigma' : sigma,
        'sigma_top' : sigma,
        'tol' : 1E-12,          # Tolerance in numerical integration
        }
    params['spline'] = {
        'method': 'spline',
        'lfp' : lfp,
        'coord_electrode' : z_data,
        'diam' : diam,
        'sigma' : sigma,
        'sigma_top' : sigma,
        'num_steps' : 201,      # Spatial CSD upsampling to N steps
        'tol' : 1E-12,
        }
    params['standard'] = {
        'method': 'standard',
        'lfp' : lfp,
        'coord_electrode' : z_data,
        'sigma' : sigma,
        }

    #plot LFP signal
    fig, axes = plt.subplots(len(params)+1, 1, figsize=(6, 8))
    ax = axes[0]
    im = ax.imshow(lfp.magnitude.T, origin='upper', vmin=-abs(lfp).max(),
                   vmax=abs(lfp).max(), cmap='jet_r', interpolation='nearest')
    ax.axis(ax.axis('tight'))
    cb = plt.colorbar(im, ax=ax)
    cb.set_label('LFP (%s)' % lfp_data.dimensionality.string)
    ax.set_xticklabels([])
    ax.set_title('LFP')
    ax.set_ylabel('ch #')
    i_ax = 1
    for method, param in params.items():
        ax = axes[i_ax]
        i_ax += 1
        csd = icsd.estimate_csd(**param)
        im = ax.imshow(csd.magnitude.T, origin='upper', vmin=-abs(csd).max(),
                       vmax=abs(csd).max(), cmap='jet_r',
                       interpolation='nearest')
        ax.axis(ax.axis('tight'))
        ax.set_title(method)
        cb = plt.colorbar(im, ax=ax)
        cb.set_label('CSD (%s)' % csd.dimensionality.string)
        ax.set_xticklabels([])
        ax.set_ylabel('ch #')

    plt.show()
    """

    supported_methods = ('standard', 'delta', 'step', 'spline')
    icsd_methods = ('delta', 'step', 'spline')

    if method not in supported_methods:
        print("Pamareter `method` must be either of {}".format(
            ", ".join(supported_methods)))
        raise ValueError
    elif method in icsd_methods and diam is None:
        print("Parameter `diam` must be specified for iCSD methods: {}".format(
            ", ".join(icsd_methods)))
        raise ValueError

    if not isinstance(lfp, neo.AnalogSignalArray):
        print('Parameter `lfp` must be neo.AnalogSignalArray')
        raise TypeError

    if f_type is not 'identity' and f_order is None:
        print("The order of {} filter must be specified".format(f_type))
        raise ValueError

    lfp_pqarr = lfp.magnitude.T * lfp.units
    if sigma_top is None:
        sigma_top = sigma

    arg_dict = {
        'lfp': lfp_pqarr,
        'coord_electrode': coord_electrode,
        'sigma': sigma,
        'f_type': f_type,
        'f_order': f_order,
    }
    if method == 'standard':
        csd_estimator = StandardCSD(**arg_dict)
    else:
        arg_dict['diam'] = diam
        arg_dict['sigma_top'] = sigma_top
        if method == 'delta':
            csd_estimator = DeltaiCSD(**arg_dict)
        else:
            arg_dict['tol'] = tol
            if method == 'step':
                arg_dict['h'] = h
                csd_estimator = StepiCSD(**arg_dict)
            else:
                arg_dict['num_steps'] = num_steps
                csd_estimator = SplineiCSD(**arg_dict)
    csd_pqarr = csd_estimator.get_csd()
    csd_pqarr_filtered = csd_estimator.filter_csd(csd_pqarr)
    csd = neo.AnalogSignalArray(csd_pqarr.T,
                                t_start=lfp.t_start,
                                sampling_rate=lfp.sampling_rate)
    csd_filtered = neo.AnalogSignalArray(csd_pqarr_filtered.T,
                                         t_start=lfp.t_start,
                                         sampling_rate=lfp.sampling_rate)

    return csd, csd_filtered
Ejemplo n.º 26
0
    def _cch_memory(binned_st1, binned_st2, win, border_corr, binary, kern):

        # Retrieve unclipped matrix
        st1_spmat = binned_st1.to_sparse_array()
        st2_spmat = binned_st2.to_sparse_array()
        binsize = binned_st1.binsize
        max_num_bins = max(binned_st1.num_bins, binned_st2.num_bins)

        # Set the time window in which is computed the cch
        if not isinstance(win, str):
            # Window parameter given in number of bins (integer)
            if isinstance(win[0], int) and isinstance(win[1], int):
                # Check the window parameter values
                if win[0] >= win[1] or win[0] <= -max_num_bins \
                        or win[1] >= max_num_bins:
                    raise ValueError(
                        "The window exceeds the length of the spike trains")
                # Assign left and right edges of the cch
                l, r = win[0], win[1]
            # Window parameter given in time units
            else:
                # Check the window parameter values
                if win[0].rescale(binsize.units).magnitude % \
                    binsize.magnitude != 0 or win[1].rescale(
                        binsize.units).magnitude % binsize.magnitude != 0:
                    raise ValueError(
                        "The window has to be a multiple of the binsize")
                if win[0] >= win[1] or win[0] <= -max_num_bins * binsize \
                        or win[1] >= max_num_bins * binsize:
                    raise ValueError("The window exceeds the length of the"
                                     " spike trains")
                # Assign left and right edges of the cch
                l, r = int(win[0].rescale(binsize.units) / binsize), int(
                    win[1].rescale(binsize.units) / binsize)
        # Case without explicit window parameter
        elif window == 'full':
            # cch computed for all the possible entries
            # Assign left and right edges of the cch
            r = binned_st2.num_bins - 1
            l = -binned_st1.num_bins + 1
            # cch compute only for the entries that completely overlap
        elif window == 'valid':
            # cch computed only for valid entries
            # Assign left and right edges of the cch
            r = max(binned_st2.num_bins - binned_st1.num_bins, 0)
            l = min(binned_st2.num_bins - binned_st1.num_bins, 0)
        # Check the mode parameter
        else:
            raise KeyError("Invalid window parameter")

        # For each row, extract the nonzero column indices
        # and the corresponding # data in the matrix (for performance reasons)
        st1_bin_idx_unique = st1_spmat.nonzero()[1]
        st2_bin_idx_unique = st2_spmat.nonzero()[1]

        # Case with binary entries
        if binary:
            st1_bin_counts_unique = np.array(st1_spmat.data > 0, dtype=int)
            st2_bin_counts_unique = np.array(st2_spmat.data > 0, dtype=int)
        # Case with all values
        else:
            st1_bin_counts_unique = st1_spmat.data
            st2_bin_counts_unique = st2_spmat.data

        # Initialize the counts to an array of zeroes,
        # and the bin IDs to integers
        # spanning the time axis
        counts = np.zeros(np.abs(l) + np.abs(r) + 1)
        bin_ids = np.arange(l, r + 1)
        # Compute the CCH at lags in l,...,r only
        for idx, i in enumerate(st1_bin_idx_unique):
            il = np.searchsorted(st2_bin_idx_unique, l + i)
            ir = np.searchsorted(st2_bin_idx_unique, r + i, side='right')
            timediff = st2_bin_idx_unique[il:ir] - i
            assert ((timediff >= l) & (timediff <= r)).all(), 'Not all the '
            'entries of cch lie in the window'
            counts[timediff + np.abs(l)] += (st1_bin_counts_unique[idx] *
                                             st2_bin_counts_unique[il:ir])
            st2_bin_idx_unique = st2_bin_idx_unique[il:]
            st2_bin_counts_unique = st2_bin_counts_unique[il:]
        # Border correction
        if border_corr is True:
            counts = _border_correction(counts, max_num_bins, l, r)
        if kern is not None:
            # Smoothing
            counts = _kernel_smoothing(counts, kern, l, r)
        # Transform the array count into an AnalogSignalArray
        cch_result = neo.AnalogSignalArray(
            signal=counts.reshape(counts.size, 1),
            units=pq.dimensionless,
            t_start=(bin_ids[0] - 0.5) * binned_st1.binsize,
            sampling_period=binned_st1.binsize)
        # Return only the hist_bins bins and counts before and after the
        # central one
        return cch_result, bin_ids
Ejemplo n.º 27
0
def instantaneous_rate(spiketrain, sampling_period, form,
                       sigma='auto', t_start=None, t_stop=None,
                       acausal=True, trim=False):

    """
    Estimate instantaneous firing rate by kernel convolution.

    Parameters
    -----------
    spiketrain: 'neo.SpikeTrain'
        Neo object that contains spike times, the unit of the time stamps
        and t_start and t_stop of the spike train.
    sampling_period : Quantity
        time stamp resolution of the spike times. the same resolution will
        be assumed for the kernel
    form : {'BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'}
        Kernel form. Currently implemented forms are BOX (boxcar),
        TRI (triangle), GAU (gaussian), EPA (epanechnikov), EXP (exponential),
        ALP (alpha function). EXP and ALP are asymmetric kernel forms and
        assume optional parameter `direction`.
    sigma : string or Quantity
        Standard deviation of the distribution associated with kernel shape.
        This parameter defines the time resolution of the kernel estimate
        and makes different kernels comparable (cf. [1] for symmetric kernels).
        This is used here as an alternative definition to the cut-off
        frequency of the associated linear filter.
        Default value is 'auto'. In this case, the optimized kernel width for
        the rate estimation is calculated according to [1].
    t_start : Quantity (Optional)
        start time of the interval used to compute the firing rate, if None
        assumed equal to spiketrain.t_start
        Default:None
    t_stop : Qunatity
        End time of the interval used to compute the firing rate (included).
        If none assumed equal to spiketrain.t_stop
        Default:None
    acausal : bool
        if True, acausal filtering is used, i.e., the gravity center of the
        filter function is aligned with the spike to convolve
        Default:None
    m_idx : int
        index of the value in the kernel function vector that corresponds
        to its gravity center. this parameter is not mandatory for
        symmetrical kernels but it is required when asymmetrical kernels
        are to be aligned at their gravity center with the event times if None
        is assumed to be the median value of the kernel support
        Default : None
    trim : bool
        if True, only the 'valid' region of the convolved
        signal are returned, i.e., the points where there
        isn't complete overlap between kernel and spike train
        are discarded
        NOTE: if True and an asymmetrical kernel is provided
        the output will not be aligned with [t_start, t_stop]

    Returns
    -------
    rate : neo.AnalogSignalArray
        Contains the rate estimation in unit hertz (Hz).
        Has a property 'rate.times' which contains the time axis of the rate
        estimate. The unit of this property is the same as the resolution that
        is given as an argument to the function.

    Raises
    ------
    TypeError:
        If argument value for the parameter `sigma` is not a quantity object
        or string 'auto'.

    See also
    --------
    elephant.statistics.make_kernel

    References
    ----------
    ..[1] H. Shimazaki, S. Shinomoto, J Comput Neurosci (2010) 29:171–182.
    """
    if sigma == 'auto':
        unit = spiketrain.units
        kernel_width = sskernel(spiketrain.magnitude, tin=None,
                                bootstrap=True)['optw']
        sigma = kernel_width*unit
    elif not isinstance(sigma, pq.Quantity):
        raise TypeError('sigma must be either a quantities object or "auto".'
                        ' Found: %s, value %s' %(type(sigma), str(sigma)))

    kernel, norm, m_idx = make_kernel(form=form, sigma=sigma,
                                      sampling_period=sampling_period)
    units = pq.CompoundUnit("%s*s" % str(sampling_period.rescale('s').magnitude))
    spiketrain = spiketrain.rescale(units)
    if t_start is None:
        t_start = spiketrain.t_start
    else:
        t_start = t_start.rescale(spiketrain.units)

    if t_stop is None:
        t_stop = spiketrain.t_stop
    else:
        t_stop = t_stop.rescale(spiketrain.units)

    time_vector = np.zeros(int((t_stop - t_start)) + 1)

    spikes_slice = spiketrain.time_slice(t_start, t_stop) \
        if len(spiketrain) else np.array([])

    for spike in spikes_slice:
        index = int((spike - t_start))
        time_vector[index] += 1

    r = norm * scipy.signal.fftconvolve(time_vector, kernel, 'full')
    if np.any(r < 0):
        warnings.warn('Instantaneous firing rate approximation contains '
                      'negative values, possibly caused due to machine '
                      'precision errors')

    if acausal:
        if not trim:
            r = r[m_idx:-(kernel.size - m_idx)]

        elif trim:
            r = r[2 * m_idx:-2*(kernel.size - m_idx)]
            t_start = t_start + m_idx * spiketrain.units
            t_stop = t_stop - ((kernel.size) - m_idx) * spiketrain.units

    else:
        if not trim:
            r = r[m_idx:-(kernel.size - m_idx)]

        elif trim:
            r = r[2 * m_idx:-2*(kernel.size - m_idx)]
            t_start = t_start + m_idx * spiketrain.units
            t_stop = t_stop - ((kernel.size) - m_idx) * spiketrain.units

    rate = neo.AnalogSignalArray(signal=r.reshape(r.size, 1),
                                 sampling_period=sampling_period,
                                 units=pq.Hz, t_start=t_start)

    return rate
Ejemplo n.º 28
0
def estimate_csd(lfp,
                 coords=None,
                 method=None,
                 process_estimate=True,
                 **kwargs):
    """
    Fuction call to compute the current source density (CSD) from extracellular
    potential recordings(local-field potentials - LFP) using laminar electrodes
    or multi-contact electrodes with 2D or 3D geometries.

    Parameters
    ----------
    lfp : list(neo.AnalogSignal type objects)
        positions of electrodes can be added as neo.RecordingChannel
        coordinate or sent externally as a func argument (See coords)
    coords : [Optional] corresponding spatial coordinates of the electrodes
        Defaults to None
        Otherwise looks for RecordingChannels coordinate
    method : string
        Pick a method corresonding to the setup, in this implementation
        For Laminar probe style (1D), use 'KCSD1D' or 'StandardCSD',
         or 'DeltaiCSD' or 'StepiCSD' or 'SplineiCSD'
        For MEA probe style (2D),  use 'KCSD2D', or 'MoIKCSD'
        For array of laminar probes (3D), use 'KCSD3D'
        Defaults to None
    process_estimate : bool
        In the py_iCSD_toolbox this corresponds to the filter_csd -
        the parameters are passed as kwargs here ie., f_type and f_order
        In the kcsd methods this corresponds to cross_validate -
        the parameters are passed as kwargs here ie., lambdas and Rs
        Defaults to True
    kwargs : parameters to each method
        The parameters corresponding to the method chosen
        See the documentation of the individual method
        Default is {} - picks the best parameters,

    Returns
    -------
    Estimated CSD
       neo.AnalogSignalArray Object
       annotated with the spatial coordinates

    Raises
    ------
    AttributeError
        No units specified for electrode spatial coordinates
    ValueError
        Invalid function arguments, wrong method name, or
        mismatching coordinates
    TypeError
        Invalid cv_param argument passed
    """
    if not isinstance(lfp[0], neo.AnalogSignal):
        raise TypeError('Parameter `lfp` must be a list(neo.AnalogSignal \
                         type objects')
    if coords is None:
        coords = []
        for ii in lfp:
            coords.append(ii.recordingchannel.coordinate.rescale(pq.mm))
    else:
        scaled_coords = []
        for coord in coords:
            try:
                scaled_coords.append(coord.rescale(pq.mm))
            except AttributeError:
                raise AttributeError('No units given for electrode spatial \
                coordinates')
        coords = scaled_coords
    if method is None:
        raise ValueError('Must specify a method of CSD implementation')
    if len(coords) != len(lfp):
        raise ValueError('Number of signals and coords is not same')
    for ii in coords:  # CHECK for Dimensionality of electrodes
        if len(ii) > 3:
            raise ValueError('Invalid number of coordinate positions')
    dim = len(coords[0])  # TODO : Generic co-ordinates!
    if dim == 1 and (method not in available_1d):
        raise ValueError('Invalid method, Available options are:',
                         available_1d)
    if dim == 2 and (method not in available_2d):
        raise ValueError('Invalid method, Available options are:',
                         available_2d)
    if dim == 3 and (method not in available_3d):
        raise ValueError('Invalid method, Available options are:',
                         available_3d)
    if method in kernel_methods:
        input_array = np.zeros((len(lfp), lfp[0].magnitude.shape[0]))
        for ii, jj in enumerate(lfp):
            input_array[ii, :] = jj.rescale(pq.mV).magnitude
        kernel_method = getattr(KCSD, method)  # fetch the class 'KCSD1D'
        lambdas = kwargs.pop('lambdas', None)
        Rs = kwargs.pop('Rs', None)
        k = kernel_method(np.array(coords), input_array, **kwargs)
        if process_estimate:
            k.cross_validate(lambdas, Rs)
        estm_csd = k.values()
        estm_csd = np.rollaxis(estm_csd, -1, 0)
        output = neo.AnalogSignalArray(estm_csd * pq.uA / pq.mm**3,
                                       t_start=lfp[0].t_start,
                                       sampling_rate=lfp[0].sampling_rate)

        if dim == 1:
            output.annotate(x_coords=k.estm_x)
        elif dim == 2:
            output.annotate(x_coords=k.estm_x, y_coords=k.estm_y)
        elif dim == 3:
            output.annotate(x_coords=k.estm_x,
                            y_coords=k.estm_y,
                            z_coords=k.estm_z)
    elif method in py_iCSD_toolbox:

        coords = np.array(coords) * coords[0].units

        if method in icsd_methods:
            try:
                coords = coords.rescale(kwargs['diam'].units)
            except KeyError:  # Then why specify as a default in icsd?
                # All iCSD methods explicitly assume a source
                # diameter in contrast to the stdCSD  that
                # implicitly assume infinite source radius
                raise ValueError("Parameter diam must be specified for iCSD \
                                  methods: {}".format(", ".join(icsd_methods)))

        if 'f_type' in kwargs:
            if (kwargs['f_type'] is not 'identity') and  \
               (kwargs['f_order'] is None):
                raise ValueError("The order of {} filter must be \
                                  specified".format(kwargs['f_type']))

        lfp = neo.AnalogSignalArray(np.asarray(lfp).T,
                                    units=lfp[0].units,
                                    sampling_rate=lfp[0].sampling_rate)
        csd_method = getattr(icsd, method)  # fetch class from icsd.py file
        csd_estimator = csd_method(lfp=lfp.magnitude.T * lfp.units,
                                   coord_electrode=coords.flatten(),
                                   **kwargs)
        csd_pqarr = csd_estimator.get_csd()

        if process_estimate:
            csd_pqarr_filtered = csd_estimator.filter_csd(csd_pqarr)
            output = neo.AnalogSignalArray(csd_pqarr_filtered.T,
                                           t_start=lfp.t_start,
                                           sampling_rate=lfp.sampling_rate)
        else:
            output = neo.AnalogSignalArray(csd_pqarr.T,
                                           t_start=lfp.t_start,
                                           sampling_rate=lfp.sampling_rate)
        output.annotate(x_coords=coords)
    return output
Ejemplo n.º 29
0
        seg.spikes.append(sp)

    for ind2 in range(3):
        irr = neo.IrregularlySampledSignal(name='IrregularlySampled' + str(ind2), times=np.random.rand(10) * qu.s,
                                           signal=np.random.rand(10) * qu.mV)
        irr.segment = seg
        seg.irregularlysampledsignals.append(irr)

    for ind2 in range(3):
        an = neo.AnalogSignal(name='AnalogSignal' + str(ind2), signal=np.random.rand(10) * qu.mV,
                              sampling_rate=10 * qu.Hz)
        an.segment = seg
        seg.analogsignals.append(an)

    for ind2 in range(3):
        an = neo.AnalogSignalArray(name='AnalogSignalArray' + str(ind2), signal=np.random.rand(10, 10) * qu.mV,
                                   sampling_rate=10 * qu.Hz)
        sp.segment = seg
        seg.analogsignalarrays.append(an)

    for ind2 in range(3):
        ev = neo.Event(name='Event' + str(ind2), time=np.random.rand() * qu.s, label='h')
        ev.segment = seg
        seg.events.append(ev)

    for ind2 in range(3):
        eva = neo.EventArray(name='EventArray' + str(ind2), times=np.random.rand(10) * qu.s, label=['h'] * 10)
        eva.segment = seg
        seg.eventarrays.append(eva)

    for ind2 in range(3):
        ep = neo.Epoch(name='Epoch' + str(ind2), time=np.random.rand() * qu.s, duration=np.random.rand() * qu.s,
Ejemplo n.º 30
0
    def _cch_speed(binned_st1, binned_st2, win, border_corr, binary, kern):

        # Retrieve the array of the binne spik train
        st1_arr = binned_st1.to_array()[0, :]
        st2_arr = binned_st2.to_array()[0, :]
        binsize = binned_st1.binsize

        # Convert the to binary version
        if binary:
            st1_arr = np.array(st1_arr > 0, dtype=int)
            st2_arr = np.array(st2_arr > 0, dtype=int)
        max_num_bins = max(len(st1_arr), len(st2_arr))

        # Cross correlate the spiketrains

        # Case explicit temporal window
        if not isinstance(win, str):
            # Window parameter given in number of bins (integer)
            if isinstance(win[0], int) and isinstance(win[1], int):
                # Check the window parameter values
                if win[0] >= win[1] or win[0] <= -max_num_bins \
                        or win[1] >= max_num_bins:
                    raise ValueError(
                        "The window exceed the length of the spike trains")
                # Assign left and right edges of the cch
                l, r = win
            # Window parameter given in time units
            else:
                # Check the window parameter values
                if win[0].rescale(binsize.units).magnitude % \
                    binsize.magnitude != 0 or win[1].rescale(
                        binsize.units).magnitude % binsize.magnitude != 0:
                    raise ValueError(
                        "The window has to be a multiple of the binsize")
                if win[0] >= win[1] or win[0] <= -max_num_bins * binsize \
                        or win[1] >= max_num_bins * binsize:
                    raise ValueError("The window exceed the length of the"
                                     " spike trains")
                # Assign left and right edges of the cch
                l, r = int(win[0].rescale(binsize.units) / binsize), int(
                    win[1].rescale(binsize.units) / binsize)

            # Zero padding
            st1_arr = np.pad(st1_arr,
                             (int(np.abs(np.min([l, 0]))), np.max([r, 0])),
                             mode='constant')
            cch_mode = 'valid'
        else:
            # Assign the edges of the cch for the different mode parameters
            if win == 'full':
                # Assign left and right edges of the cch
                r = binned_st2.num_bins - 1
                l = -binned_st1.num_bins + 1
            # cch compute only for the entries that completely overlap
            elif win == 'valid':
                # Assign left and right edges of the cch
                r = max(binned_st2.num_bins - binned_st1.num_bins, 0)
                l = min(binned_st2.num_bins - binned_st1.num_bins, 0)
            cch_mode = win

        # Cross correlate the spike trains
        counts = np.correlate(st2_arr, st1_arr, mode=cch_mode)
        bin_ids = np.r_[l:r + 1]
        # Border correction
        if border_corr is True:
            counts = _border_correction(counts, max_num_bins, l, r)
        if kern is not None:
            # Smoothing
            counts = _kernel_smoothing(counts, kern, l, r)
        # Transform the array count into an AnalogSignalArray
        cch_result = neo.AnalogSignalArray(
            signal=counts.reshape(counts.size, 1),
            units=pq.dimensionless,
            t_start=(bin_ids[0] - 0.5) * binned_st1.binsize,
            sampling_period=binned_st1.binsize)
        # Return only the hist_bins bins and counts before and after the
        # central one
        return cch_result, bin_ids