Пример #1
0
    def setUp(self):
        xx_ele, yy_ele, zz_ele = utils.generate_electrodes(dim=3, res=5,
                                                           xlims=[0.15, 0.85],
                                                           ylims=[0.15, 0.85],
                                                           zlims=[0.15, 0.85])
        self.ele_pos = np.vstack((xx_ele, yy_ele, zz_ele)).T
        self.csd_profile = utils.gauss_3d_dipole
        pots = CSD.generate_lfp(self.csd_profile, xx_ele, yy_ele, zz_ele)
        self.pots = np.reshape(pots, (-1, 1))
        self.test_method = 'KCSD3D'
        self.test_params = {'gdx': 0.05, 'gdy': 0.05, 'gdz': 0.05,
                            'lambd': 5.10896977451e-19, 'src_type': 'step',
                            'R_init': 0.31, 'xmin': 0., 'xmax': 1., 'ymin': 0.,
                            'ymax': 1., 'zmin': 0., 'zmax': 1.}

        temp_signals = []
        for ii in range(len(self.pots)):
            temp_signals.append(self.pots[ii])
        self.an_sigs = neo.AnalogSignal(np.array(temp_signals).T * pq.mV,
                                        sampling_rate=1000 * pq.Hz)
        self.an_sigs.annotate(coordinates=self.ele_pos * pq.mm)
Пример #2
0
def test_traceviewer_cls_method_neo(interactive=False):
    sigs = np.random.rand(100000,16)
    sample_rate = 1000.
    t_start = 0.

    neo_anasig = neo.AnalogSignal(sigs*pq.mV, sampling_rate=sample_rate*pq.Hz, t_start=0*pq.s, copy=True)
    print(neo_anasig)


    app = ephyviewer.mkQApp()

    view = ephyviewer.TraceViewer.from_neo_analogsignal(neo_anasig, 'sigs')
    win = ephyviewer.MainViewer(debug=True, show_auto_scale=True)
    win.add_view(view)

    if interactive:
        win.show()
        app.exec_()
    else:
        # close thread properly
        win.close()
    def test_threshold_detection(self):
        # Test whether spikes are extracted at the correct times from
        # an analog signal.

        # Load membrane potential simulated using Brian2
        # according to make_spike_extraction_test_data.py.
        curr_dir = os.path.dirname(os.path.realpath(__file__))
        raw_data_file_loc = os.path.join(curr_dir,
                                         'spike_extraction_test_data.txt')
        raw_data = []
        with open(raw_data_file_loc, 'r') as f:
            for x in (f.readlines()):
                raw_data.append(float(x))
        vm = neo.AnalogSignal(raw_data, units=V, sampling_period=0.1 * ms)
        spike_train = stgen.threshold_detection(vm)
        try:
            len(spike_train)
        except TypeError:  # Handles an error in Neo related to some zero length
            # spike trains being treated as unsized objects.
            warnings.warn((
                "The spike train may be an unsized object. This may be related "
                "to an issue in Neo with some zero-length SpikeTrain objects. "
                "Bypassing this by creating an empty SpikeTrain object."))
            spike_train = neo.core.SpikeTrain([],
                                              t_start=spike_train.t_start,
                                              t_stop=spike_train.t_stop,
                                              units=spike_train.units)

        # Correct values determined previously.
        true_spike_train = [
            0.0123, 0.0354, 0.0712, 0.1191, 0.1694, 0.22, 0.2711
        ]

        # Does threshold_detection gives the correct number of spikes?
        self.assertEqual(len(spike_train), len(true_spike_train))
        # Does threshold_detection gives the correct times for the spikes?
        try:
            assert_array_almost_equal(spike_train, spike_train)
        except AttributeError:  # If numpy version too old to have allclose
            self.assertTrue(np.array_equal(spike_train, spike_train))
Пример #4
0
        def spikes(instantaneous_rates, num_trials, timestep):
            """
            Parameters
            ----------
            instantaneous_rates : np.ndarray
                Array containing time series.
            timestep :
                Sample period.
            num_steps : int
                Number of timesteps -> max_time = timestep*(num_steps-1).

            Returns
            -------
            spiketrains : list of neo.SpikeTrains
                List containing spiketrains of inhomogeneous Poisson
                processes based on given instantaneous rates.

            """

            spiketrains = []
            for _ in range(num_trials):
                spiketrains_per_trial = []
                for inst_rate in instantaneous_rates:
                    # print(inst_rate.shape)
                    # print(inst_rate)
                    anasig_inst_rate = neo.AnalogSignal(inst_rate,
                                                        sampling_rate=1 /
                                                        timestep,
                                                        units=pq.Hz)
                    # print(inst_rate)
                    # print(np.mean(inst_rate))
                    # print(anasig_inst_rate)
                    spiketrains_per_trial.append(
                        inhomogeneous_poisson_process(anasig_inst_rate,
                                                      as_array=True))
                    # print(spiketrains_per_trial)
                    # assert False
                spiketrains.append(spiketrains_per_trial)

            return spiketrains
Пример #5
0
    def test_recovered_firing_rate_profile(self):
        np.random.seed(54)
        t_start = 0 * pq.s
        t_stop = 4 * np.round(np.pi, decimals=3) * pq.s  # 2 full periods
        sampling_period = 0.001 * pq.s

        # an arbitrary rate profile
        profile = 0.5 * (1 + np.sin(np.arange(t_start.item(), t_stop.item(),
                                              sampling_period.item())))

        time_generation = 0
        n_trials = 200
        rtol = 0.05  # 5% of deviation allowed
        kernel = kernels.RectangularKernel(sigma=0.25 * pq.s)
        for rate in (10 * pq.Hz, 100 * pq.Hz):
            rate_profile = neo.AnalogSignal(rate * profile,
                                            sampling_period=sampling_period)
            # the recovered firing rate profile should not depend on the
            # shape factor; here we test float and integer values of the shape
            # factor: the method supports float values that is not trivial
            # for inhomogeneous gamma process generation
            for shape_factor in (1, 2.5, 10.):

                spiketrains = \
                    [stgen.inhomogeneous_gamma_process(
                        rate_profile, shape_factor=shape_factor)
                     for _ in range(n_trials)]
                rate_recovered = instantaneous_rate(
                    spiketrains,
                    sampling_period=sampling_period,
                    kernel=kernel,
                    t_start=t_start,
                    t_stop=t_stop, trim=True).sum(axis=1) / n_trials

                rate_recovered = rate_recovered.flatten().magnitude
                trim = (rate_profile.shape[0] - rate_recovered.shape[0]) // 2
                rate_profile_valid = rate_profile.magnitude.squeeze()
                rate_profile_valid = rate_profile_valid[trim: -trim - 1]
                assert_allclose(rate_recovered, rate_profile_valid,
                                rtol=0, atol=rtol * rate.item())
Пример #6
0
 def setUp(self):
     xx_ele, yy_ele = utils.generate_electrodes(dim=2, res=9,
                                                xlims=[0.05, 0.95],
                                                ylims=[0.05, 0.95])
     self.ele_pos = np.vstack((xx_ele, yy_ele)).T
     self.csd_profile = utils.large_source_2D
     pots = CSD.generate_lfp(
         self.csd_profile,
         xx_ele,
         yy_ele,
         resolution=100)
     self.pots = np.reshape(pots, (-1, 1))
     self.test_method = 'KCSD2D'
     self.test_params = {'gdx': 0.25, 'gdy': 0.25, 'R_init': 0.08,
                         'h': 50., 'xmin': 0., 'xmax': 1.,
                         'ymin': 0., 'ymax': 1.}
     temp_signals = []
     for ii in range(len(self.pots)):
         temp_signals.append(self.pots[ii])
     self.an_sigs = neo.AnalogSignal(np.array(temp_signals).T * pq.mV,
                                     sampling_rate=1000 * pq.Hz)
     self.an_sigs.annotate(coordinates=self.ele_pos * pq.mm)
Пример #7
0
    def read(self, block_index=0):
        """read neo block from hdf5 file

        Parameters
        ----------
        block_index : int
            Index of the block in the file. Defaults to 0.
        """

        # Read neo block
        reader = neo.io.NeoHdf5IO()
        reader.connect(self.h5_file)
        block = reader.read()[block_index]
        reader.close()

        # Add wav file data to block
        h5path = split(self.h5_file)[0]

        for seg in block.segments:

            # Read data from wav file
            wav_file = seg.annotations['wav_file']
            wav_path = join(h5path, 'wav', wav_file)
            fs, data = wavfile.read(wav_path)

            # Convert int16 to normalized float64
            data = data / 2.**15

            # Add samples to segment
            sig = neo.AnalogSignal(data,
                                   copy=False,
                                   units=pq.V,
                                   t_start=0 * pq.s,
                                   file_origin=wav_file,
                                   sampling_rate=fs * pq.Hz,
                                   name=splitext(wav_file)[0])
            seg.annotate(wav_signal=sig)

        return block
    def test_zscore_single_inplace_int(self):
        """
        Test if the z-score is correctly calculated even if the input is an
        AnalogSignal of type int, asking for an inplace operation.
        """
        signal = neo.AnalogSignal(self.test_seq1,
                                  units='mV',
                                  t_start=0. * pq.ms,
                                  sampling_rate=1000. * pq.Hz,
                                  dtype=int)

        m = np.mean(self.test_seq1)
        s = np.std(self.test_seq1)
        target = (self.test_seq1 - m) / s

        assert_array_almost_equal(elephant.signal_processing.zscore(
            signal, inplace=True).magnitude,
                                  target.reshape(-1, 1).astype(int),
                                  decimal=9)

        # Assert original signal is overwritten
        self.assertEqual(signal[0].magnitude, target.astype(int)[0])
    def test_zscore_single_inplace(self):
        """
        Test z-score on a single AnalogSignal, asking for an inplace
        operation.
        """
        signal = neo.AnalogSignal(
            self.test_seq1, units='mV',
            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)

        m = np.mean(self.test_seq1)
        s = np.std(self.test_seq1)
        target = (self.test_seq1 - m) / s

        result = elephant.signal_processing.zscore(signal, inplace=True)

        assert_array_almost_equal(
            result.magnitude, target.reshape(-1, 1), decimal=9)

        self.assertEqual(result.units, pq.Quantity(1. * pq.dimensionless))

        # Assert original signal is overwritten
        self.assertEqual(signal[0].magnitude, target[0])
    def test_zscore_single_dup(self):
        """
        Test z-score on a single AnalogSignal, asking to return a
        duplicate.
        """
        signal = neo.AnalogSignal(
            self.test_seq1, units='mV',
            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)

        m = np.mean(self.test_seq1)
        s = np.std(self.test_seq1)
        target = (self.test_seq1 - m) / s
        assert_array_equal(target, scipy.stats.zscore(self.test_seq1))

        result = elephant.signal_processing.zscore(signal, inplace=False)
        assert_array_almost_equal(
            result.magnitude, target.reshape(-1, 1), decimal=9)

        self.assertEqual(result.units, pq.Quantity(1. * pq.dimensionless))

        # Assert original signal is untouched
        self.assertEqual(signal[0].magnitude, self.test_seq1[0])
Пример #11
0
    def test_zscore_single_multidim_inplace(self):
        """
        Test z-score on a single AnalogSignal with multiple dimensions, asking
        for an inplace operation.
        """
        signal = neo.AnalogSignal(np.vstack([self.test_seq1, self.test_seq2]),
                                  units='mV',
                                  t_start=0. * pq.ms,
                                  sampling_rate=1000. * pq.Hz,
                                  dtype=float)

        m = np.mean(signal.magnitude, axis=0, keepdims=True)
        s = np.std(signal.magnitude, axis=0, keepdims=True)
        target = (signal.magnitude - m) / s

        assert_array_almost_equal(elephant.signal_processing.zscore(
            signal, inplace=True).magnitude,
                                  target,
                                  decimal=9)

        # Assert original signal is overwritten
        self.assertEqual(signal[0, 0].magnitude, target[0, 0])
Пример #12
0
def analog_signal_array_to_analog_signals(signal_array):
    """ Return a list of analog signals for an analog signal array.

    If ``signal_array`` is attached to a recording channel group with exactly
    is many channels as there are channels in ``signal_array``, each created
    signal will be assigned the corresponding channel. If the attached
    recording channel group has only one recording channel, all created signals
    will be assigned to this channel. In all other cases, the created
    signal will not have a reference to a recording channel.

    Note that while the created signals may have references to a segment and
    channels, the relationships in the other direction are
    not automatically created (the signals are not attached to the recording
    channel or segment). Other properties like annotations are not copied or
    referenced in the created analog signals.

    :param signal_array: An analog signal array from which the
        :class:`neo.core.AnalogSignal` objects are constructed.
    :type signal_array: :class:`neo.core.AnalogSignalArray`
    :return: A list of analog signals, one for every channel in
        ``signal_array``.
    :rtype: list
    """
    signals = []
    rcg = signal_array.recordingchannelgroup

    for i in xrange(signal_array.shape[1]):
        s = neo.AnalogSignal(signal_array[:, i],
                             t_start=signal_array.t_start,
                             sampling_rate=signal_array.sampling_rate)
        if len(rcg.recordingchannels) == 1:
            s.recordingchannel = rcg.recordingchannels[0]
        elif len(rcg.recordingchannels) == signal_array.shape[1]:
            s.recordingchannel = rcg.recordingchannels[i]
        s.segment = signal_array.segment
        signals.append(s)

    return signals
Пример #13
0
    def ApplyFilter(self, sig):
        st = np.array(sig)
        for nf, typ in enumerate(self.Type):
            if typ == 'lp' or typ == 'hp':
                FType = self.FTypes[typ]
                Freqs = self.Freq1[nf]/(0.5*sig.sampling_rate)
            elif typ == 'bp' or typ == 'bs':
                FType = self.FTypes[typ]
                Freqs = [self.Freq1[nf]/(0.5*sig.sampling_rate),
                         self.Freq2[nf]/(0.5*sig.sampling_rate)]
            else:
                print 'Filter Type error ', typ
                continue

#            print nf, self.Order[nf], Freqs, FType
            b, a = signal.butter(self.Order[nf], Freqs, FType)
            st = signal.filtfilt(b, a, st, axis=0)

        return neo.AnalogSignal(st,
                                units=sig.units,
                                t_start=sig.t_start,
                                sampling_rate=sig.sampling_rate,
                                name=sig.name)
Пример #14
0
    def InitContMeas(self, Vin, Fs, Refresh, RecDC=True, GenTestSig=False):
        #  Init Neo record
        out_seg = neo.Segment(name='NewSeg')

        if RecDC:
            self.EventContDcDone = self.ContDcDoneCallback
            for chk, chi, in sorted(self.DCChannelIndex.iteritems()):
                name = chk
                sig = neo.AnalogSignal(signal=np.empty((0, 1), float),
                                       units=pq.V,
                                       t_start=0*pq.s,
                                       sampling_rate=Fs*pq.Hz,
                                       name=name)
                out_seg.analogsignals.append(sig)

        self.ContRecord = NeoRecord(Seg=out_seg, UnitGain=1)

        #  Lauch adquisition
        self.SetBias(Vsig=Vin)
        self.GetContinuousCurrent(Fs=Fs,
                                  Refresh=Refresh,
                                  GenTestSig=GenTestSig)
        self.CharactRunning = True
Пример #15
0
 def test_cross_correlation_freqs(self):
     '''
     Sine vs cosine for different frequencies
     Note, that accuracy depends on N and min(f).
     E.g., f=0.1 and N=2018 only has an accuracy on the order decimal=1
     '''
     freq_arr = np.linspace(0.5, 15, 8) * pq.Hz
     signal = np.zeros((self.n_samples, 2))
     for freq in freq_arr:
         signal[:, 0] = np.sin(2. * np.pi * freq * self.time)
         signal[:, 1] = np.cos(2. * np.pi * freq * self.time)
         # Convert signal to neo.AnalogSignal
         signal_neo = neo.AnalogSignal(signal,
                                       units='mV',
                                       t_start=0. * pq.ms,
                                       sampling_rate=self.sampling_rate,
                                       dtype=float)
         rho = elephant.signal_processing.cross_correlation_function(
             signal_neo, [0, 1])
         # Cross-correlation of sine and cosine should be sine
         assert_array_almost_equal(rho.magnitude[:, 0],
                                   np.sin(2. * np.pi * freq * rho.times),
                                   decimal=2)
Пример #16
0
    def test_zscore_single_dup_int(self):
        """
        Test if the z-score is correctly calculated even if the input is an
        AnalogSignal of type int, asking for a duplicate (duplicate should
        be of type float).
        """
        signal = neo.AnalogSignal(self.test_seq1,
                                  units='mV',
                                  t_start=0. * pq.ms,
                                  sampling_rate=1000. * pq.Hz,
                                  dtype=int)

        m = np.mean(self.test_seq1)
        s = np.std(self.test_seq1)
        target = (self.test_seq1 - m) / s

        assert_array_almost_equal(elephant.signal_processing.zscore(
            signal, inplace=False).magnitude,
                                  target.reshape(-1, 1),
                                  decimal=9)

        # Assert original signal is untouched
        self.assertEqual(signal.magnitude[0], self.test_seq1[0])
Пример #17
0
    def test_zscore_single_multidim_dup(self):
        '''
        Test z-score on a single AnalogSignal with multiple dimensions, asking
        to return a duplicate.
        '''
        signal = neo.AnalogSignal(np.transpose(
            np.vstack([self.test_seq1, self.test_seq2])),
                                  units='mV',
                                  t_start=0. * pq.ms,
                                  sampling_rate=1000. * pq.Hz,
                                  dtype=float)

        m = np.mean(signal.magnitude, axis=0, keepdims=True)
        s = np.std(signal.magnitude, axis=0, keepdims=True)
        target = (signal.magnitude - m) / s

        assert_array_almost_equal(elephant.signal_processing.zscore(
            signal, inplace=False).magnitude,
                                  target,
                                  decimal=9)

        # Assert original signal is untouched
        self.assertEqual(signal[0, 0].magnitude, self.test_seq1[0])
Пример #18
0
def runtest_neo(io, N):
    times = []
    blk = neo.Block()
    seg = neo.Segment()
    blk.segments.append(seg)
    step = 1
    if N >= 10:
        step = N // 10
    Ns = list()
    for n in range(0, N + step, step):
        seg.analogsignals = []
        for ni in range(n):
            seg.analogsignals.append(
                neo.AnalogSignal(signal=[0],
                                 units="V",
                                 sampling_rate=1 * pq.Hz))
        t0 = time()
        io.write_block(blk)
        times.append(time() - t0)
        Ns.append(n)
        print(f" :: {n}/{N} {int(n/N*100):3d}%", end="\r")

    print(f" :: Last write time: {times[-1]:7.05f} s")

    print("Verifying neo-nix file")
    assert len(io.nix_file.blocks) == 1
    blk = io.nix_file.blocks[0]
    assert blk.type == "neo.block"

    assert len(blk.groups) == 1
    grp = blk.groups[0]
    assert grp.type == "neo.segment"

    assert len(blk.data_arrays) == N
    assert len(grp.data_arrays) == N

    return Ns, times
Пример #19
0
def get_var(blk, varname='M', join=True, keep_neo=True):
    ''' use this utility to access an analog variable from all segments in a block easily
    If you choose to join the segments, returns a list of '''

    split_points = []
    var = []
    # Create a list of the analog signals for each segment
    for seg in blk.segments:
        names = [str(x.name) for x in seg.analogsignals]
        names = [w.replace('Moment', 'M') for w in names]
        names = [w.replace('Force', 'F') for w in names]
        idx = names.index(varname)
        if keep_neo:
            var.append(seg.analogsignals[idx])
        else:
            var.append(seg.analogsignals[idx].as_array())
            split_points.append(seg.analogsignals[idx].shape[0])

    if join:
        if keep_neo:
            data = []
            t_start = 0. * pq.s
            t_stop = 0. * pq.s
            for seg in var:
                data.append(seg.as_array())
                t_stop += seg.t_stop
            data = np.concatenate(data, axis=0)
            sig = neo.AnalogSignal(data * var[0].units,
                                   t_start=t_start,
                                   sampling_rate=var[0].sampling_rate,
                                   name=var[0].name)
            return sig
        else:
            var = np.concatenate(var, axis=0)
        return (var, split_points)
    else:
        return var
Пример #20
0
    def _cch_speed(binned_st1, binned_st2, left_edge, right_edge, cch_mode,
                   border_corr, binary, kern):

        # Retrieve the array of the binne spike train
        st1_arr = binned_st1.to_array()[0, :]
        st2_arr = binned_st2.to_array()[0, :]

        # Convert the to binary version
        if binary:
            st1_arr = np.array(st1_arr > 0, dtype=int)
            st2_arr = np.array(st2_arr > 0, dtype=int)
        if cch_mode == 'pad':
            # Zero padding to stay between left_edge and right_edge
            st1_arr = np.pad(
                st1_arr,
                (int(np.abs(np.min([left_edge, 0]))), np.max([right_edge, 0])),
                mode='constant')
            cch_mode = 'valid'
        # Cross correlate the spike trains
        counts = np.correlate(st2_arr, st1_arr, mode=cch_mode)
        bin_ids = np.r_[left_edge:right_edge + 1]
        # Border correction
        if border_corr is True:
            counts = _border_correction(counts, max_num_bins, left_edge,
                                        right_edge)
        if kern is not None:
            # Smoothing
            counts = _kernel_smoothing(counts, kern, left_edge, right_edge)
        # Transform the array count into an AnalogSignal
        cch_result = neo.AnalogSignal(signal=counts.reshape(counts.size, 1),
                                      units=pq.dimensionless,
                                      t_start=(bin_ids[0] - 0.5) *
                                      binned_st1.binsize,
                                      sampling_period=binned_st1.binsize)
        # Return only the hist_bins bins and counts before and after the
        # central one
        return cch_result, bin_ids
Пример #21
0
    def test_butter_input_types(self):
        # generate white noise data of different types
        noise_np = np.random.normal(size=5000)
        noise_pq = noise_np * pq.mV
        noise = neo.AnalogSignal(noise_pq, sampling_rate=1000.0 * pq.Hz)

        # check input as NumPy ndarray
        filtered_noise_np = elephant.signal_processing.butter(noise_np,
                                                              400.0,
                                                              100.0,
                                                              fs=1000.0)
        self.assertTrue(isinstance(filtered_noise_np, np.ndarray))
        self.assertFalse(isinstance(filtered_noise_np, pq.quantity.Quantity))
        self.assertFalse(isinstance(filtered_noise_np, neo.AnalogSignal))
        self.assertEqual(filtered_noise_np.shape, noise_np.shape)

        # check input as Quantity array
        filtered_noise_pq = elephant.signal_processing.butter(noise_pq,
                                                              400.0 * pq.Hz,
                                                              100.0 * pq.Hz,
                                                              fs=1000.0)
        self.assertTrue(isinstance(filtered_noise_pq, pq.quantity.Quantity))
        self.assertFalse(isinstance(filtered_noise_pq, neo.AnalogSignal))
        self.assertEqual(filtered_noise_pq.shape, noise_pq.shape)

        # check input as neo AnalogSignal
        filtered_noise = elephant.signal_processing.butter(
            noise, 400.0 * pq.Hz, 100.0 * pq.Hz)
        self.assertTrue(isinstance(filtered_noise, neo.AnalogSignal))
        self.assertEqual(filtered_noise.shape, noise.shape)

        # check if the results from different input types are identical
        self.assertTrue(
            np.all(filtered_noise_pq.magnitude == filtered_noise_np))
        self.assertTrue(
            np.all(filtered_noise.magnitude[:, 0] == filtered_noise_np))
def dataArray2AnalogSignal(dataArray):
    '''
    Convert a nix data_array into a neo analogsignal
    :param dataArray: nix.data_array
    :return: neo.analogsignal
    '''

    assert len(
        dataArray.dimensions) == 1, 'Only one dimensional arrays are supported'
    dim = dataArray.dimensions[0]
    assert isinstance(dim, nix.pycore.SampledDimension), 'Only Sampled Dimensions' \
                                                         'are supported'

    t_start = qu.Quantity(dim.offset, units=dim.unit)
    samplingPeriod = qu.Quantity(dim.sampling_interval, units=dim.unit)

    analogSignal = neo.AnalogSignal(signal=np.array(dataArray[:]),
                                    units=dataArray.unit,
                                    sampling_period=samplingPeriod,
                                    t_start=t_start)

    analogSignal.name = dataArray.name

    return analogSignal
Пример #23
0
 def recording(self, port_name, t_start=None):
     """
     Return recorded data as a dictionary containing one numpy array for
     each neuron, ids as keys.
     """
     if self.is_dead():
         t_stop = self._t_stop
     else:
         t_stop = self.Simulation.active().t
     if t_start is None:
         t_start = UnitHandler.to_pq_quantity(self._t_start)
     t_start = pq.Quantity(t_start, 'ms')
     t_stop = self.unit_handler.to_pq_quantity(t_stop)
     try:
         port = self.component_class.port(port_name)
     except NineMLNameError:
         port = self.component_class.state_variable(port_name)
     if isinstance(port, EventPort):
         events = numpy.asarray(self._recordings[port_name])
         recording = neo.SpikeTrain(self._trim_spike_train(events, t_start),
                                    t_start=t_start,
                                    t_stop=t_stop,
                                    units='ms')
     else:
         units_str = self.unit_handler.dimension_to_unit_str(
             port.dimension, one_as_dimensionless=True)
         interval = h.dt * pq.ms
         signal = numpy.asarray(self._recordings[port_name])
         recording = neo.AnalogSignal(self._trim_analog_signal(
             signal, t_start, interval),
                                      sampling_period=interval,
                                      t_start=t_start,
                                      units=units_str,
                                      name=port_name)
         recording = recording[:-1]  # Drop final timepoint
     return recording
Пример #24
0
def merge_analogsingals(asigs):
    # ToDo: to be replaced by neo utils functions
    if len(asigs) == 1:
        return asigs[0]

    min_length = np.min([len(asig.times) for asig in asigs])
    max_length = np.max([len(asig.times) for asig in asigs])
    if min_length != max_length:
        print('Warning: the length of the analog signals differs '\
            + 'between {} and {} '.format(min_length, max_length)\
            + 'All signals will be cut to the same length and merged '\
            + 'into one AnalogSignal object.')

    if len(np.unique([asig.sampling_rate for asig in asigs])) > 1:
        raise ValueError('The AnalogSignal objects have different '\
                       + 'sampling rates!')

    asig_array = np.zeros((min_length, len(asigs)))

    for channel_number, asig in enumerate(asigs):
        asig_array[:,
                   channel_number] = np.squeeze(asig.as_array()[:min_length])

    merged_asig = neo.AnalogSignal(asig_array * asigs[0].units,
                                   sampling_rate=asigs[0].sampling_rate,
                                   t_start=asigs[0].t_start)
    for key in asigs[0].annotations.keys():
        annotation_values = np.array([a.annotations[key] for a in asigs])
        try:
            if (annotation_values == annotation_values[0]).all():
                merged_asig.annotations[key] = annotation_values[0]
            else:
                merged_asig.array_annotations[key] = annotation_values
        except:
            print('Can not merge annotation ', key)
    return merged_asig
Пример #25
0
MultiTimer("generate data")
# =======================================================================
# ASSET Method
# =======================================================================
imat, xx, yy = asset.intersection_matrix(sts, binsize=binsize, dt=T)

MultiTimer("intersection_matrix")
# Compute the probability matrix, either analytically or via bootstrapping
if prob_method == 'a':
    # Estimate rates
    fir_rates = list(np.zeros(shape=len(sts)))
    for st_id, st_trial in enumerate(sts):
        fir_rates[st_id] = estats.instantaneous_rate(
            st_trial, sampling_period=sampl_period)
        fir_rates[st_id] = neo.AnalogSignal(fir_rates[st_id],
                                            t_start=t_pre,
                                            t_stop=t_post,
                                            sampling_period=sampl_period)
    # Compute the probability matrix analytically
    pmat, x_edges, y_edges = asset.probability_matrix_analytical(
        sts, binsize, dt=T, fir_rates=fir_rates)
elif prob_method == 'b':
    # Compute the probability matrix via bootstrapping (Montecarlo)
    pmat, x_edges, y_edges = asset.probability_matrix_montecarlo(sts,
                                                                 binsize,
                                                                 dt=T,
                                                                 j=dither_T,
                                                                 n_surr=n_surr)
MultiTimer("prob_method")
# Compute the joint probability matrix
jmat = asset.joint_probability_matrix(pmat,
                                      filter_shape=(fl, fw),
Пример #26
0
def cross_correlation_histogram(
        binned_spiketrain_i, binned_spiketrain_j, window='full',
        border_correction=False, binary=False, kernel=None, method='speed',
        cross_correlation_coefficient=False):
    """
    Computes the cross-correlation histogram (CCH) between two binned spike
    trains `binned_spiketrain_i` and `binned_spiketrain_j`.

    Visualization of this function is covered in Viziphant:
    :func:`viziphant.spike_train_correlation.plot_cross_correlation_histogram`.


    Parameters
    ----------
    binned_spiketrain_i, binned_spiketrain_j :
        elephant.conversion.BinnedSpikeTrain
        Binned spike trains of lengths N and M to cross-correlate. The input
        spike trains can have any `t_start` and `t_stop`.
    window : {'valid', 'full'} or list of int, optional
        ‘full’: This returns the cross-correlation at each point of overlap,
                with an output shape of (N+M-1,). At the end-points of the
                cross-correlogram, the signals do not overlap completely, and
                boundary effects may be seen.
        ‘valid’: Mode valid returns output of length max(M, N) - min(M, N) + 1.
                 The cross-correlation product is only given for points where
                 the signals overlap completely.
                 Values outside the signal boundary have no effect.
        List of integers (min_lag, max_lag):
              The entries of window are two integers representing the left and
              right extremes (expressed as number of bins) where the
              cross-correlation is computed.
        Default: 'full'.
    border_correction : bool, optional
        whether to correct for the border effect. If True, the value of the
        CCH at bin :math:`b` (for :math:`b=-H,-H+1, ...,H`, where :math:`H` is
        the CCH half-length) is multiplied by the correction factor:

        .. math::
                            (H+1)/(H+1-|b|),

        which linearly corrects for loss of bins at the edges.
        Default: False.
    binary : bool, optional
        If True, spikes falling in the same bin are counted as a single spike;
        otherwise they are counted as different spikes.
        Default: False.
    kernel : np.ndarray or None, optional
        A one dimensional array containing a smoothing kernel applied
        to the resulting CCH. The length N of the kernel indicates the
        smoothing window. The smoothing window cannot be larger than the
        maximum lag of the CCH. The kernel is normalized to unit area before
        being applied to the resulting CCH. Popular choices for the kernel are
          * normalized boxcar kernel: `numpy.ones(N)`
          * hamming: `numpy.hamming(N)`
          * hanning: `numpy.hanning(N)`
          * bartlett: `numpy.bartlett(N)`
        If None, the CCH is not smoothed.
        Default: None.
    method : {'speed', 'memory'}, optional
        Defines the algorithm to use. "speed" uses `numpy.correlate` to
        calculate the correlation between two binned spike trains using a
        non-sparse data representation. Due to various optimizations, it is the
        fastest realization. In contrast, the option "memory" uses an own
        implementation to calculate the correlation based on sparse matrices,
        which is more memory efficient but slower than the "speed" option.
        Default: "speed".
    cross_correlation_coefficient : bool, optional
        If True, a normalization is applied to the CCH to obtain the
        cross-correlation  coefficient function ranging from -1 to 1 according
        to Equation (5.10) in [1]_. See Notes.
        Default: False.

    Returns
    -------
    cch_result : neo.AnalogSignal
        Containing the cross-correlation histogram between
        `binned_spiketrain_i` and `binned_spiketrain_j`.

        Offset bins correspond to correlations at delays equivalent
        to the differences between the spike times of `binned_spiketrain_i` and
        those of `binned_spiketrain_j`: an entry at positive lag corresponds to
        a spike in `binned_spiketrain_j` following a spike in
        `binned_spiketrain_i` bins to the right, and an entry at negative lag
        corresponds to a spike in `binned_spiketrain_i` following a spike in
        `binned_spiketrain_j`.

        To illustrate this definition, consider two spike trains with the same
        `t_start` and `t_stop`:
        `binned_spiketrain_i` ('reference neuron') : 0 0 0 0 1 0 0 0 0 0 0
        `binned_spiketrain_j` ('target neuron')    : 0 0 0 0 0 0 0 1 0 0 0
        Here, the CCH will have an entry of `1` at `lag=+3`.

        Consistent with the definition of `neo.AnalogSignals`, the time axis
        represents the left bin borders of each histogram bin. For example,
        the time axis might be:
        `np.array([-2.5 -1.5 -0.5 0.5 1.5]) * ms`
    lags : np.ndarray
        Contains the IDs of the individual histogram bins, where the central
        bin has ID 0, bins to the left have negative IDs and bins to the right
        have positive IDs, e.g.,:
        `np.array([-3, -2, -1, 0, 1, 2, 3])`

    Notes
    -----
    1. The Eq. (5.10) in [1]_ is valid for binned spike trains with at most one
       spike per bin. For a general case, refer to the implementation of
       `_covariance_sparse()`.
    2. Alias: `cch`

    References
    ----------
    .. [1] "Analysis of parallel spike trains", 2010, Gruen & Rotter, Vol 7.

    Examples
    --------
    Plot the cross-correlation histogram between two Poisson spike trains

    >>> import elephant
    >>> import matplotlib.pyplot as plt
    >>> import quantities as pq

    >>> binned_spiketrain_i = elephant.conversion.BinnedSpikeTrain(
    ...        elephant.spike_train_generation.homogeneous_poisson_process(
    ...            10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
    ...        bin_size=5. * pq.ms)
    >>> binned_spiketrain_j = elephant.conversion.BinnedSpikeTrain(
    ...        elephant.spike_train_generation.homogeneous_poisson_process(
    ...            10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
    ...        bin_size=5. * pq.ms)

    >>> cc_hist = \
    ...    elephant.spike_train_correlation.cross_correlation_histogram(
    ...        binned_spiketrain_i, binned_spiketrain_j, window=[-30,30],
    ...        border_correction=False,
    ...        binary=False, kernel=None, method='memory')

    >>> plt.bar(left=cc_hist[0].times.magnitude,
    ...         height=cc_hist[0][:, 0].magnitude,
    ...         width=cc_hist[0].sampling_period.magnitude)
    >>> plt.xlabel('time (' + str(cc_hist[0].times.units) + ')')
    >>> plt.ylabel('cross-correlation histogram')
    >>> plt.axis('tight')
    >>> plt.show()

    """

    # Check that the spike trains are binned with the same temporal
    # resolution
    if binned_spiketrain_i.shape[0] != 1 or \
            binned_spiketrain_j.shape[0] != 1:
        raise ValueError("Spike trains must be one dimensional")

    # rescale to the common units
    # this does not change the data - only its representation
    binned_spiketrain_j.rescale(binned_spiketrain_i.units)

    if not np.isclose(binned_spiketrain_i._bin_size,
                      binned_spiketrain_j._bin_size):
        raise ValueError("Bin sizes must be equal")

    bin_size = binned_spiketrain_i._bin_size
    left_edge_min = -binned_spiketrain_i.n_bins + 1
    right_edge_max = binned_spiketrain_j.n_bins - 1

    t_lags_shift = (binned_spiketrain_j._t_start -
                    binned_spiketrain_i._t_start) / bin_size
    if not np.isclose(t_lags_shift, round(t_lags_shift)):
        # For example, if bin_size=1 ms, binned_spiketrain_i.t_start=0 ms, and
        # binned_spiketrain_j.t_start=0.5 ms then there is a global shift in
        # the binning of the spike trains.
        raise ValueError(
            "Binned spiketrains time shift is not multiple of bin_size")
    t_lags_shift = int(round(t_lags_shift))

    # In the examples below we fix st2 and "move" st1.
    # Zero-lag is equal to `max(st1.t_start, st2.t_start)`.
    # Binned spiketrains (t_start and t_stop) with bin_size=1ms:
    # 1) st1=[3, 8] ms, st2=[1, 13] ms
    #    t_start_shift = -2 ms
    #    zero-lag is at 3 ms
    # 2) st1=[1, 7] ms, st2=[2, 9] ms
    #    t_start_shift = 1 ms
    #    zero-lag is at 2 ms
    # 3) st1=[1, 7] ms, st2=[4, 6] ms
    #    t_start_shift = 3 ms
    #    zero-lag is at 4 ms

    # Find left and right edges of unaligned (time-dropped) time signals
    if len(window) == 2 and np.issubdtype(type(window[0]), np.integer) \
            and np.issubdtype(type(window[1]), np.integer):
        # ex. 1) lags range: [w[0] - 2, w[1] - 2] ms
        # ex. 2) lags range: [w[0] + 1, w[1] + 1] ms
        # ex. 3) lags range: [w[0] + 3, w[0] + 3] ms
        if window[0] >= window[1]:
            raise ValueError(
                "Window's left edge ({left}) must be lower than the right "
                "edge ({right})".format(left=window[0], right=window[1]))
        left_edge, right_edge = np.subtract(window, t_lags_shift)
        if left_edge < left_edge_min or right_edge > right_edge_max:
            raise ValueError(
                "The window exceeds the length of the spike trains")
        lags = np.arange(window[0], window[1] + 1, dtype=np.int32)
        cch_mode = 'pad'
    elif window == 'full':
        # cch computed for all the possible entries
        # ex. 1) lags range: [-6, 9] ms
        # ex. 2) lags range: [-4, 7] ms
        # ex. 3) lags range: [-2, 4] ms
        left_edge = left_edge_min
        right_edge = right_edge_max
        lags = np.arange(left_edge + t_lags_shift,
                         right_edge + 1 + t_lags_shift, dtype=np.int32)
        cch_mode = window
    elif window == 'valid':
        lags = _CrossCorrHist.get_valid_lags(binned_spiketrain_i,
                                             binned_spiketrain_j)
        left_edge, right_edge = lags[(0, -1), ]
        cch_mode = window
    else:
        raise ValueError("Invalid window parameter")

    if binary:
        binned_spiketrain_i = binned_spiketrain_i.binarize()
        binned_spiketrain_j = binned_spiketrain_j.binarize()

    cch_builder = _CrossCorrHist(binned_spiketrain_i, binned_spiketrain_j,
                                 window=(left_edge, right_edge))
    if method == 'memory':
        cross_corr = cch_builder.correlate_memory(cch_mode=cch_mode)
    else:
        cross_corr = cch_builder.correlate_speed(cch_mode=cch_mode)

    if border_correction:
        if window == 'valid':
            warnings.warn(
                "Border correction does not have any effect in "
                "'valid' window mode since there are no border effects!")
        else:
            cross_corr = cch_builder.border_correction(cross_corr)
    if kernel is not None:
        cross_corr = cch_builder.kernel_smoothing(cross_corr, kernel=kernel)
    if cross_correlation_coefficient:
        cross_corr = cch_builder.cross_correlation_coefficient(cross_corr)

    normalization = 'normalized' if cross_correlation_coefficient else 'counts'
    annotations = dict(window=window, border_correction=border_correction,
                       binary=binary, kernel=kernel is not None,
                       normalization=normalization)
    annotations = dict(cch_parameters=annotations)

    # Transform the array count into an AnalogSignal
    t_start = pq.Quantity((lags[0] - 0.5) * bin_size,
                          units=binned_spiketrain_i.units, copy=False)
    cch_result = neo.AnalogSignal(
        signal=np.expand_dims(cross_corr, axis=1),
        units=pq.dimensionless,
        t_start=t_start,
        sampling_period=binned_spiketrain_i.bin_size, copy=False,
        **annotations)
    return cch_result, lags
def generate_sts(data_type, N=100):
    '''
    Generate a list of parallel spike trains with different statistics.

    The data are composed of background spiking activity plus possibly
    a repeated sequence of synchronous events (SSE).
    The background activity depends on the value of data_type.
    The size and occurrence count of the SSE is specified by sse_params.

    Parameters
    ----------
    data_type : int
        An integer specifying the type of background activity.
        At the moment the following types of background activity are
        supported (note: homog = across neurons; stat = over time):
        0 : 100 indep Poisson with rate 25 Hz
        1: 100 indep Poisson nonstat-step (10/60/10 Hz)
        2: 100 indep Poisson heterog (5->25 Hz), stat
        3 : 100 indep Poisson, rate increase with latency variability
    N: int 
        total number of neurons in the model. The default is N=100.
    Output
    ------
    sts : list of SpikeTrains
        a list of spike trains
    params : dict
        a dictionary of simulation parameters

    '''
    T = 1 * pq.s  # simulation time
    sampl_period = 10 * pq.ms  # sampling period of the rate profile
    params = {'nr_neurons': N, 'simul_time': T}
    # Indep Poisson homog, stat rate 25 Hz
    if data_type == 0:
        # Define a rate profile
        rate = 25 * pq.Hz
        # Generate data
        sts = stg._n_poisson(rate=rate, t_stop=T, n=N)
        # Storing rate parameter
        params['rate'] = rate
    # Indep Poisson,  homog, nonstat-step (10/60/10 Hz)
    elif data_type == 1:
        a0, a1 = 10 * pq.Hz, 60 * pq.Hz  # baseline and transient rates
        t1, t2 = 600 * pq.ms, 700 * pq.ms  # time segment of transient rate
        # Define a rate profile
        times = sampl_period.units * np.arange(
            0,
            T.rescale(sampl_period.units).magnitude, sampl_period.magnitude)
        rate_profile = np.zeros(times.shape)
        rate_profile[np.any([times < t1, times > t2], axis=0)] = a0.magnitude
        rate_profile[np.all([times >= t1, times <= t2], axis=0)] = a1.magnitude
        rate_profile = rate_profile * a0.units
        rate_profile = neo.AnalogSignal(rate_profile,
                                        sampling_period=sampl_period)
        # Generate data
        sts = [
            stg.inhomogeneous_poisson_process(rate_profile) for i in range(N)
        ]
        # Storing rate parameter
        params['rate'] = rate_profile
    # Indep Poisson, heterog (5->15 Hz), stat
    elif data_type == 2:
        rate_min = 5 * pq.Hz  # min rate. Ensures that there is >=1 spike
        rate_max = 25 * pq.Hz  # max rate
        rates = np.linspace(rate_min.magnitude, rate_max.magnitude, N) * pq.Hz
        # Define a rate profile
        # Generate data
        sts = [
            stg.homogeneous_poisson_process(rate=rate, t_stop=T)
            for rate in rates
        ]
        random.shuffle(sts)
        # Storing rate parameter
        params['rate'] = rates
    # Indep Poisson, rate increase sequence
    elif data_type == 3:
        l = 20  # 20 groups of neurons
        w = 5  # of 5 neurons each
        t0 = 50 * pq.ms  # the first of which increases the rate at time t0
        t00 = 500 * pq.ms  # and again at time t00
        ratechange_dur = 5 * pq.ms  # old: 10ms  # for a short period
        a0, a1 = 14 * pq.Hz, 100 * pq.Hz  # from rate a0 to a1
        ratechange_delay = 5 * pq.ms  # old: 10ms; followed with delay by next group
        # Define a rate profile
        times = sampl_period.units * np.arange(
            0,
            T.rescale(sampl_period.units).magnitude, sampl_period.magnitude)
        sts = []
        rate_profiles = []
        for i in range(l * w):
            t1 = t0 + (i // w) * ratechange_delay
            t2 = t1 + ratechange_dur
            t11 = t00 + (i // w) * ratechange_delay
            t22 = t11 + ratechange_dur
            rate_profile = np.zeros(times.shape)
            rate_profile[np.any([times < t1, times > t2], axis=0)] = \
                a0.magnitude
            rate_profile[np.all([times >= t1, times <= t2], axis=0)] = \
                a1.magnitude
            rate_profile[np.all([times >= t11, times <= t22], axis=0)] = \
                a1.magnitude
            rate_profile = rate_profile * a0.units
            rate_profile = neo.AnalogSignal(rate_profile,
                                            sampling_period=sampl_period)
            rate_profiles.append(rate_profile)
            # Generate data
            sts.append(stg.inhomogeneous_poisson_process(rate_profile))
        # Storing rate parameter
        params['rate'] = rate_profiles
    else:
        raise ValueError(
            'data_type %d not supported. Provide int from 0 to 10' % data_type)
    return sts, params
Пример #28
0
def cross_correlation_histogram(binned_st1,
                                binned_st2,
                                window='full',
                                border_correction=False,
                                binary=False,
                                kernel=None,
                                method='speed',
                                cross_corr_coef=False):
    """
    Computes the cross-correlation histogram (CCH) between two binned spike
    trains `binned_st1` and `binned_st2`.

    Parameters
    ----------
    binned_st1, binned_st2 : elephant.conversion.BinnedSpikeTrain
        Binned spike trains to cross-correlate. The two spike trains must have
        same `t_start` and `t_stop`.
    window : {'valid', 'full', list}, optional
        String or list of integers.
        ‘full’: This returns the cross-correlation at each point of overlap,
                with an output shape of (N+M-1,). At the end-points of the
                cross-correlogram, the signals do not overlap completely, and
                boundary effects may be seen.
        ‘valid’: Mode valid returns output of length max(M, N) - min(M, N) + 1.
                 The cross-correlation product is only given for points where
                 the signals overlap completely.
                 Values outside the signal boundary have no effect.
        List of integers (min_lag, max_lag):
              The entries of window are two integers representing the left and
              right extremes (expressed as number of bins) where the
              cross-correlation is computed.
        Default: 'full'
    border_correction : bool, optional
        whether to correct for the border effect. If True, the value of the
        CCH at bin b (for b=-H,-H+1, ...,H, where H is the CCH half-length)
        is multiplied by the correction factor:
                            (H+1)/(H+1-|b|),
        which linearly corrects for loss of bins at the edges.
        Default: False
    binary : bool, optional
        whether to binary spikes from the same spike train falling in the
        same bin. If True, such spikes are considered as a single spike;
        otherwise they are considered as different spikes.
        Default: False.
    kernel : array or None, optional
        A one dimensional array containing an optional smoothing kernel applied
        to the resulting CCH. The length N of the kernel indicates the
        smoothing window. The smoothing window cannot be larger than the
        maximum lag of the CCH. The kernel is normalized to unit area before
        being applied to the resulting CCH. Popular choices for the kernel are
          * normalized boxcar kernel: numpy.ones(N)
          * hamming: numpy.hamming(N)
          * hanning: numpy.hanning(N)
          * bartlett: numpy.bartlett(N)
        If None is specified, the CCH is not smoothed.
        Default: None
    method : string, optional
        Defines the algorithm to use. "speed" uses numpy.correlate to calculate
        the correlation between two binned spike trains using a non-sparse data
        representation. Due to various optimizations, it is the fastest
        realization. In contrast, the option "memory" uses an own
        implementation to calculate the correlation based on sparse matrices,
        which is more memory efficient but slower than the "speed" option.
        Default: "speed"
    cross_corr_coef : bool, optional
        Normalizes the CCH to obtain the cross-correlation  coefficient
        function ranging from -1 to 1 according to Equation (5.10) in [1]_.
        See Notes.

    Returns
    -------
    cch : neo.AnalogSignal
        Containing the cross-correlation histogram between `binned_st1` and
        `binned_st2`.

        The central bin of the histogram represents correlation at zero
        delay (instantaneous correlation).
        Offset bins correspond to correlations at a delay equivalent
        to the difference between the spike times of `binned_st1` and those of
        `binned_st2`: an entry at positive lags corresponds to a spike in
        `binned_st2` following a spike in `binned_st1` bins to the right, and
        an entry at negative lags corresponds to a spike in `binned_st1`
        following a spike in `binned_st2`.

        To illustrate this definition, consider the two spike trains:
        `binned_st1`: 0 0 0 0 1 0 0 0 0 0 0
        `binned_st2`: 0 0 0 0 0 0 0 1 0 0 0
        Here, the CCH will have an entry of 1 at lag h=+3.

        Consistent with the definition of AnalogSignals, the time axis
        represents the left bin borders of each histogram bin. For example,
        the time axis might be:
        `np.array([-2.5 -1.5 -0.5 0.5 1.5]) * ms`
    bin_ids : np.ndarray
        Contains the IDs of the individual histogram bins, where the central
        bin has ID 0, bins the left have negative IDs and bins to the right
        have positive IDs, e.g.,:
        `np.array([-3, -2, -1, 0, 1, 2, 3])`

    Notes
    -----
    The Eq. (5.10) in [1]_ is valid for binned spike trains with at most one
    spike per bin. For a general case, refer to the implementation of
    `_covariance_sparse()`.

    References
    ----------
    .. [1] "Analysis of parallel spike trains", 2010, Gruen & Rotter, Vol 7.

    Example
    -------
        Plot the cross-correlation histogram between two Poisson spike trains
        >>> import elephant
        >>> import matplotlib.pyplot as plt
        >>> import quantities as pq

        >>> binned_st1 = elephant.conversion.BinnedSpikeTrain(
        >>>        elephant.spike_train_generation.homogeneous_poisson_process(
        >>>            10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
        >>>        binsize=5. * pq.ms)
        >>> binned_st2 = elephant.conversion.BinnedSpikeTrain(
        >>>        elephant.spike_train_generation.homogeneous_poisson_process(
        >>>            10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
        >>>        binsize=5. * pq.ms)

        >>> cc_hist = \
        >>>    elephant.spike_train_correlation.cross_correlation_histogram(
        >>>        binned_st1, binned_st2, window=[-30,30],
        >>>        border_correction=False,
        >>>        binary=False, kernel=None, method='memory')

        >>> plt.bar(left=cc_hist[0].times.magnitude,
        >>>         height=cc_hist[0][:, 0].magnitude,
        >>>         width=cc_hist[0].sampling_period.magnitude)
        >>> plt.xlabel('time (' + str(cc_hist[0].times.units) + ')')
        >>> plt.ylabel('cross-correlation histogram')
        >>> plt.axis('tight')
        >>> plt.show()

    Alias
    -----
    `cch`
    """

    # Check that the spike trains are binned with the same temporal
    # resolution
    if not binned_st1.matrix_rows == 1:
        raise ValueError("Spike train must be one dimensional")
    if not binned_st2.matrix_rows == 1:
        raise ValueError("Spike train must be one dimensional")
    if not np.isclose(binned_st1.binsize.simplified.magnitude,
                      binned_st2.binsize.simplified.magnitude):
        raise ValueError("Bin sizes must be equal")

    # Check t_start and t_stop identical (to drop once that the
    # pad functionality wil be available in the BinnedSpikeTrain class)
    if not binned_st1.t_start == binned_st2.t_start:
        raise ValueError("Spike train must have same t start")
    if not binned_st1.t_stop == binned_st2.t_stop:
        raise ValueError("Spike train must have same t stop")

    # The maximum number of of bins
    max_num_bins = max(binned_st1.num_bins, binned_st2.num_bins)

    # Set the time window in which is computed the cch
    # Window parameter given in number of bins (integer)
    if isinstance(window[0], int) and isinstance(window[1], int):
        # Check the window parameter values
        if window[0] >= window[1] or window[0] <= -max_num_bins \
                or window[1] >= max_num_bins:
            raise ValueError(
                "The window exceeds the length of the spike trains")
        # Assign left and right edges of the cch
        left_edge, right_edge = window[0], window[1]
        # The mode in which to compute the cch for the speed implementation
        cch_mode = 'pad'
    # Case without explicit window parameter
    elif window == 'full':
        # cch computed for all the possible entries
        # Assign left and right edges of the cch
        right_edge = binned_st2.num_bins - 1
        left_edge = -binned_st1.num_bins + 1
        cch_mode = window
        # cch compute only for the entries that completely overlap
    elif window == 'valid':
        # cch computed only for valid entries
        # Assign left and right edges of the cch
        right_edge = max(binned_st2.num_bins - binned_st1.num_bins, 0)
        left_edge = min(binned_st2.num_bins - binned_st1.num_bins, 0)
        cch_mode = window
    # Check the mode parameter
    else:
        raise ValueError("Invalid window parameter")
    if binary:
        binned_st1 = binned_st1.binarize(copy=True)
        binned_st2 = binned_st2.binarize(copy=True)

    cch_builder = CrossCorrHist(binned_st1,
                                binned_st2,
                                window=(left_edge, right_edge))
    if method == 'memory':
        cross_corr = cch_builder.correlate_memory()
    else:
        cross_corr = cch_builder.correlate_speed(cch_mode=cch_mode)
    bin_ids = np.arange(left_edge, right_edge + 1)
    if border_correction:
        cross_corr = cch_builder.border_correction(cross_corr)
    if kernel is not None:
        cross_corr = cch_builder.kernel_smoothing(cross_corr, kernel=kernel)
    if cross_corr_coef:
        cross_corr = cch_builder.cross_corr_coef(cross_corr)

    # Transform the array count into an AnalogSignal
    cch_result = neo.AnalogSignal(
        signal=cross_corr.reshape(cross_corr.size, 1),
        units=pq.dimensionless,
        t_start=(bin_ids[0] - 0.5) * binned_st1.binsize,
        sampling_period=binned_st1.binsize)
    # Return only the hist_bins bins and counts before and after the
    # central one
    return cch_result, bin_ids
Пример #29
0
import quantities as pq
import numpy as np
import nixio as nix
from neo.io import NixIO

block = neo.Block()
chn_index = neo.ChannelIndex([0, 1, 2],
                             channel_names=["a", "b", "c"],
                             channel_ids=[1, 2, 3])
block.channel_indexes.append(chn_index)
unit = neo.Unit(name="x", description="contain1st")
chn_index.units.append(unit)

seg = neo.Segment()
asig = neo.AnalogSignal(name="signal",
                        signal=[1.1, 1.2, 1.5],
                        units="mV",
                        sampling_rate=1 * pq.Hz)
seg.analogsignals.append(asig)
asig2 = neo.AnalogSignal(name="signal2",
                         signal=[1.1, 1.2, 2.5],
                         units="mV",
                         sampling_rate=1 * pq.Hz)
seg.analogsignals.append(asig2)
irasig = neo.IrregularlySampledSignal(name="irsignal",
                                      signal=np.random.random((100, 2)),
                                      units="mV",
                                      times=np.cumsum(
                                          np.random.random(100) * pq.s))
seg.irregularlysampledsignals.append(irasig)
event = neo.Event(name="event",
                  times=np.cumsum(np.random.random(10)) * pq.ms,
 def get_weights(self):
     signal = neo.AnalogSignal(self._weights, units='nA', sampling_period=self.interval * ms,
                             name="weight")
     signal.channel_index = neo.ChannelIndex(numpy.arange(len(self._weights[0])))
     return signal