def poisson_step_rate(t_start=0 * s,
                       t_stop=100 * s,
                       rate1=5 * Hz,
                       rate2=10 * Hz,
                       n_samples=1000,
                       min_len=1000):
     """
     Returns a poisson process with step rate given by `rate1` and `rate2`
     """
     # TODO: give different start stop for 2nd spiketrain
     minimum = np.inf
     sts = []
     for i in range(n_samples):
         spikes1 = homogeneous_poisson_process(rate1, t_start, t_stop)
         s1_start, s1_stop = spikes1.t_start, spikes1.t_stop
         spikes2 = homogeneous_poisson_process(rate2,
                                               t_start=spikes1[-1],
                                               t_stop=2 * s1_stop,
                                               as_array=True)
         s_concat = np.concatenate((spikes1.magnitude, spikes2))
         sts.append(s_concat)
         if len(s_concat) < minimum:
             minimum = len(s_concat)
     sts = np.array([st[:minimum] for st in sts])
     return sts
Beispiel #2
0
 def test_low_rates(self):
     spiketrain = stgen.homogeneous_poisson_process(0 * Hz,
                                                    t_stop=1000 * ms)
     self.assertEqual(spiketrain.size, 0)
     # not really a test, just making sure that all code paths are covered
     for i in range(10):
         spiketrain = stgen.homogeneous_poisson_process(1 * Hz,
                                                        t_stop=1000 * ms)
Beispiel #3
0
 def test_spike_contrast_trace(self):
     np.random.seed(15)
     spike_train_1 = stgen.homogeneous_poisson_process(rate=20 * Hz,
                                                       t_stop=1000. * ms)
     spike_train_2 = stgen.homogeneous_poisson_process(rate=20 * Hz,
                                                       t_stop=1000. * ms)
     synchrony, trace = spc.spike_contrast([spike_train_1, spike_train_2],
                                           return_trace=True)
     self.assertEqual(synchrony, max(trace.synchrony))
     self.assertEqual(len(trace.contrast), len(trace.active_spiketrains))
     self.assertEqual(len(trace.active_spiketrains), len(trace.synchrony))
Beispiel #4
0
 def test_zero_refractory_period(self):
     rate = 10 * pq.Hz
     t_stop = 20 * pq.s
     np.random.seed(27)
     sp1 = stgen.homogeneous_poisson_process(rate, t_stop=t_stop,
                                             as_array=True)
     np.random.seed(27)
     sp2 = stgen.homogeneous_poisson_process(rate, t_stop=t_stop,
                                             refractory_period=0 * pq.ms,
                                             as_array=True)
     assert_array_almost_equal(sp1, sp2)
Beispiel #5
0
 def test_spike_contrast_non_overlapping_spiketrains(self):
     np.random.seed(15)
     spike_train_1 = stgen.homogeneous_poisson_process(rate=10 * Hz,
                                                       t_start=0. * ms,
                                                       t_stop=10000. * ms)
     spike_train_2 = stgen.homogeneous_poisson_process(rate=10 * Hz,
                                                       t_start=5000. * ms,
                                                       t_stop=10000. * ms)
     spiketrains = [spike_train_1, spike_train_2]
     synchrony = spike_contrast(spiketrains, t_stop=5000 * ms)
     # the synchrony of non-overlapping spiketrains must be zero
     self.assertEqual(synchrony, 0.)
Beispiel #6
0
 def test_low_rates(self):
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         """
         Catch RuntimeWarning: divide by zero encountered in true_divide
         mean_interval = 1 / rate.magnitude, when rate == 0 Hz.
         """
         spiketrain = stgen.homogeneous_poisson_process(0 * Hz,
                                                        t_stop=1000 * ms)
     self.assertEqual(spiketrain.size, 0)
     # not really a test, just making sure that all code paths are covered
     spiketrain = stgen.homogeneous_poisson_process(1 * Hz,
                                                    t_stop=1000 * ms)
Beispiel #7
0
 def setUp(self):
     self.st00 = SpikeTrain([], units='ms', t_stop=1000.0)
     self.st01 = SpikeTrain([1], units='ms', t_stop=1000.0)
     self.st02 = SpikeTrain([2], units='ms', t_stop=1000.0)
     self.st03 = SpikeTrain([2.9], units='ms', t_stop=1000.0)
     self.st04 = SpikeTrain([3.1], units='ms', t_stop=1000.0)
     self.st05 = SpikeTrain([5], units='ms', t_stop=1000.0)
     self.st06 = SpikeTrain([500], units='ms', t_stop=1000.0)
     self.st07 = SpikeTrain([12, 32], units='ms', t_stop=1000.0)
     self.st08 = SpikeTrain([32, 52], units='ms', t_stop=1000.0)
     self.st09 = SpikeTrain([42], units='ms', t_stop=1000.0)
     self.st10 = SpikeTrain([18, 60], units='ms', t_stop=1000.0)
     self.st11 = SpikeTrain([10, 20, 30, 40], units='ms', t_stop=1000.0)
     self.st12 = SpikeTrain([40, 30, 20, 10], units='ms', t_stop=1000.0)
     self.st13 = SpikeTrain([15, 25, 35, 45], units='ms', t_stop=1000.0)
     self.st14 = SpikeTrain([10, 20, 30, 40, 50], units='ms', t_stop=1000.0)
     self.st15 = SpikeTrain([0.01, 0.02, 0.03, 0.04, 0.05],
                            units='s',
                            t_stop=1000.0)
     self.st16 = SpikeTrain([12, 16, 28, 30, 42], units='ms', t_stop=1000.0)
     self.st21 = stg.homogeneous_poisson_process(50 * Hz, 0 * ms, 1000 * ms)
     self.st22 = stg.homogeneous_poisson_process(40 * Hz, 0 * ms, 1000 * ms)
     self.st23 = stg.homogeneous_poisson_process(30 * Hz, 0 * ms, 1000 * ms)
     self.rd_st_list = [self.st21, self.st22, self.st23]
     self.st31 = SpikeTrain([12.0], units='ms', t_stop=1000.0)
     self.st32 = SpikeTrain([12.0, 12.0], units='ms', t_stop=1000.0)
     self.st33 = SpikeTrain([20.0], units='ms', t_stop=1000.0)
     self.st34 = SpikeTrain([20.0, 20.0], units='ms', t_stop=1000.0)
     self.array1 = np.arange(1, 10)
     self.array2 = np.arange(1.2, 10)
     self.qarray1 = self.array1 * Hz
     self.qarray2 = self.array2 * Hz
     self.tau0 = 0.0 * ms
     self.q0 = np.inf / ms
     self.tau1 = 0.000000001 * ms
     self.q1 = 1.0 / self.tau1
     self.tau2 = 1.0 * ms
     self.q2 = 1.0 / self.tau2
     self.tau3 = 10.0 * ms
     self.q3 = 1.0 / self.tau3
     self.tau4 = 100.0 * ms
     self.q4 = 1.0 / self.tau4
     self.tau5 = 1000000000.0 * ms
     self.q5 = 1.0 / self.tau5
     self.tau6 = np.inf * ms
     self.q6 = 0.0 / ms
     self.tau7 = 0.01 * s
     self.q7 = 1.0 / self.tau7
     self.t = np.linspace(0, 200, 20000001) * ms
Beispiel #8
0
    def test_spike_contrast_double_duration(self):
        np.random.seed(19)
        spike_train_1 = stgen.homogeneous_poisson_process(rate=10 * Hz,
                                                          t_start=0. * ms,
                                                          t_stop=10000. * ms)
        spike_train_2 = stgen.homogeneous_poisson_process(rate=10 * Hz,
                                                          t_start=0. * ms,
                                                          t_stop=10000. * ms)
        spike_train_3 = stgen.homogeneous_poisson_process(rate=10 * Hz,
                                                          t_start=0. * ms,
                                                          t_stop=10000. * ms)

        spike_trains = [spike_train_1, spike_train_2, spike_train_3]
        synchrony = spike_contrast(spike_trains, t_stop=20000 * ms)
        self.assertEqual(synchrony, 0.5)
def get_default_corrcoef_matrix():
    # set random seed explicitly, which is used in homogeneous_poisson_process,
    # to avoid using different seeds for creating target and result image
    np.random.seed(0)
    spike_train_1 = homogeneous_poisson_process(rate=10.0 * Hz,
                                                t_start=0.0 * s,
                                                t_stop=10.0 * s)
    spike_train_2 = homogeneous_poisson_process(rate=10.0 * Hz,
                                                t_start=0.0 * s,
                                                t_stop=10.0 * s)
    # the binsize of 0.1s is rather large so we might expect non-zero
    # cross-correlation
    corrcoef_matrix = stcorr.corrcoef(
        BinnedSpikeTrain([spike_train_1, spike_train_2], binsize=0.1 * s))
    return corrcoef_matrix
Beispiel #10
0
 def test_spike_contrast_trace(self):
     np.random.seed(15)
     spike_train_1 = stgen.homogeneous_poisson_process(rate=20 * Hz,
                                                       t_stop=1000. * ms)
     spike_train_2 = stgen.homogeneous_poisson_process(rate=20 * Hz,
                                                       t_stop=1000. * ms)
     synchrony, trace = spike_contrast([spike_train_1, spike_train_2],
                                       return_trace=True)
     self.assertEqual(synchrony, max(trace.synchrony))
     self.assertEqual(len(trace.contrast), len(trace.active_spiketrains))
     self.assertEqual(len(trace.active_spiketrains), len(trace.synchrony))
     self.assertEqual(len(trace.bin_size), len(trace.synchrony))
     self.assertIsInstance(trace.bin_size, pq.Quantity)
     self.assertEqual(trace.bin_size[0], 500 * pq.ms)
     self.assertAlmostEqual(trace.bin_size[-1], 10.1377798 * pq.ms)
 def setUp(self):
     self.st00 = SpikeTrain([], units='ms', t_stop=1000.0)
     self.st01 = SpikeTrain([1], units='ms', t_stop=1000.0)
     self.st02 = SpikeTrain([2], units='ms', t_stop=1000.0)
     self.st03 = SpikeTrain([2.9], units='ms', t_stop=1000.0)
     self.st04 = SpikeTrain([3.1], units='ms', t_stop=1000.0)
     self.st05 = SpikeTrain([5], units='ms', t_stop=1000.0)
     self.st06 = SpikeTrain([500], units='ms', t_stop=1000.0)
     self.st07 = SpikeTrain([12, 32], units='ms', t_stop=1000.0)
     self.st08 = SpikeTrain([32, 52], units='ms', t_stop=1000.0)
     self.st09 = SpikeTrain([42], units='ms', t_stop=1000.0)
     self.st10 = SpikeTrain([18, 60], units='ms', t_stop=1000.0)
     self.st11 = SpikeTrain([10, 20, 30, 40], units='ms', t_stop=1000.0)
     self.st12 = SpikeTrain([40, 30, 20, 10], units='ms', t_stop=1000.0)
     self.st13 = SpikeTrain([15, 25, 35, 45], units='ms', t_stop=1000.0)
     self.st14 = SpikeTrain([10, 20, 30, 40, 50], units='ms', t_stop=1000.0)
     self.st15 = SpikeTrain([0.01, 0.02, 0.03, 0.04, 0.05],
                            units='s', t_stop=1000.0)
     self.st16 = SpikeTrain([12, 16, 28, 30, 42], units='ms', t_stop=1000.0)
     self.st21 = stg.homogeneous_poisson_process(50*Hz, 0*ms, 1000*ms)
     self.st22 = stg.homogeneous_poisson_process(40*Hz, 0*ms, 1000*ms)
     self.st23 = stg.homogeneous_poisson_process(30*Hz, 0*ms, 1000*ms)
     self.rd_st_list = [self.st21, self.st22, self.st23]
     self.st31 = SpikeTrain([12.0], units='ms', t_stop=1000.0)
     self.st32 = SpikeTrain([12.0, 12.0], units='ms', t_stop=1000.0)
     self.st33 = SpikeTrain([20.0], units='ms', t_stop=1000.0)
     self.st34 = SpikeTrain([20.0, 20.0], units='ms', t_stop=1000.0)
     self.array1 = np.arange(1, 10)
     self.array2 = np.arange(1.2, 10)
     self.qarray1 = self.array1 * Hz
     self.qarray2 = self.array2 * Hz
     self.tau0 = 0.0 * ms
     self.q0 = np.inf / ms
     self.tau1 = 0.000000001 * ms
     self.q1 = 1.0 / self.tau1
     self.tau2 = 1.0 * ms
     self.q2 = 1.0 / self.tau2
     self.tau3 = 10.0 * ms
     self.q3 = 1.0 / self.tau3
     self.tau4 = 100.0 * ms
     self.q4 = 1.0 / self.tau4
     self.tau5 = 1000000000.0 * ms
     self.q5 = 1.0 / self.tau5
     self.tau6 = np.inf * ms
     self.q6 = 0.0 / ms
     self.tau7 = 0.01 * s
     self.q7 = 1.0 / self.tau7
     self.t = np.linspace(0, 200, 20000001) * ms
Beispiel #12
0
    def test_statistics(self):
        # This is a statistical test that has a non-zero chance of failure
        # during normal operation. Thus, we set the random seed to a value that
        # creates a realization passing the test.
        np.random.seed(seed=12345)
        
        for rate in [123.0*Hz, 0.123*kHz]:
            for t_stop in [2345*ms, 2.345*second]:
                spiketrain = stgen.homogeneous_poisson_process(rate, t_stop=t_stop)
                intervals = isi(spiketrain)

                expected_spike_count = int((rate * t_stop).simplified)
                self.assertLess(pdiff(expected_spike_count, spiketrain.size), 0.2)  # should fail about 1 time in 1000

                expected_mean_isi = (1/rate)
                self.assertLess(pdiff(expected_mean_isi, intervals.mean()), 0.2)

                expected_first_spike = 0*ms
                self.assertLess(spiketrain[0] - expected_first_spike, 7*expected_mean_isi)

                expected_last_spike = t_stop
                self.assertLess(expected_last_spike - spiketrain[-1], 7*expected_mean_isi)

                # Kolmogorov-Smirnov test
                D, p = kstest(intervals.rescale(t_stop.units),
                              "expon",
                              args=(0, expected_mean_isi.rescale(t_stop.units)),  # args are (loc, scale)
                              alternative='two-sided')
                self.assertGreater(p, 0.001)
                self.assertLess(D, 0.12)
Beispiel #13
0
def hom_poiss(rate):
    """Generate a homogeneous spike train with average frequency given by rate
    parameter.
    """
    rate = rate * pq.Hz

    spike_trains = []
    for x in range(400):
        curr_train = stg.homogeneous_poisson_process(rate, 0.01 * pq.s,
                                                     0.51 * pq.s)
        # We have to make sure that there is sufficient space between spikes.
        # If there is not, we move the next spike by 0.1ms
        spike_trains.append(curr_train)

    array_like = np.array([
        np.around(np.array(x.times) * 1000, decimals=1) for x in spike_trains
    ])
    for arr_idx in range(array_like.shape[0]):
        bad_idc = np.argwhere(np.diff(array_like[arr_idx]) < 0.1).flatten()
        bad_idc = bad_idc + 1
        while bad_idc.any():
            for bad_idx in bad_idc:
                array_like[arr_idx][
                    bad_idx] = array_like[arr_idx][bad_idx] + 0.1
            bad_idc = np.argwhere(np.diff(array_like[arr_idx]) < 0.1).flatten()
            bad_idc = bad_idc + 1

    return array_like
Beispiel #14
0
    def test_to_spike_trains(self):
        np.random.seed(1)
        bst1 = cv.BinnedSpikeTrain(
            spiketrains=[self.spiketrain_a, self.spiketrain_b],
            bin_size=self.bin_size)
        spiketrains = [
            homogeneous_poisson_process(rate=10 * pq.Hz,
                                        t_start=-1 * pq.s,
                                        t_stop=10 * pq.s)
        ]
        bst2 = cv.BinnedSpikeTrain(spiketrains=spiketrains,
                                   bin_size=300 * pq.ms)
        for bst in (bst1, bst2):
            for spikes in ("random", "left", "center"):
                spiketrains_gen = bst.to_spike_trains(spikes=spikes,
                                                      annotate_bins=True)
                for st, indices in zip(spiketrains_gen, bst.spike_indices):
                    # check sorted
                    self.assertTrue((np.diff(st.magnitude) > 0).all())
                    assert_array_equal(st.array_annotations['bins'], indices)
                    self.assertEqual(st.annotations['bin_size'], bst.bin_size)
                    self.assertEqual(st.t_start, bst.t_start)
                    self.assertEqual(st.t_stop, bst.t_stop)
                bst_same = cv.BinnedSpikeTrain(spiketrains_gen,
                                               bin_size=bst.bin_size)
                self.assertEqual(bst_same, bst)

            # invalid mode
            self.assertRaises(ValueError, bst.to_spike_trains, spikes='right')
Beispiel #15
0
 def test_compare_with_as_array(self):
     rate = 10 * pq.Hz
     t_stop = 10 * pq.s
     for refractory_period in (None, 3 * pq.ms):
         np.random.seed(27)
         spiketrain = stgen.homogeneous_poisson_process(
             rate=rate, t_stop=t_stop, refractory_period=refractory_period)
         self.assertIsInstance(spiketrain, neo.SpikeTrain)
         np.random.seed(27)
         spiketrain_array = stgen.homogeneous_poisson_process(
             rate=rate, t_stop=t_stop, refractory_period=refractory_period,
             as_array=True)
         # don't check with isinstance: Quantity is a subclass of np.ndarray
         self.assertTrue(isinstance(spiketrain_array, np.ndarray))
         assert_array_almost_equal(spiketrain.times.magnitude,
                                   spiketrain_array)
Beispiel #16
0
def generate_stp(occ, xi, t_stop, delays, rate):
    '''
    Generate a spatio-temporal-pattern (STP). One pattern consists in a
    repeated sequence of spikes with fixed inter spikes intervals (delays).
    The starting time of the repetitions of the pattern are randomly generated.
    '''
    # Generating all the first spikes of the repetitions
    s1 = np.arange(0, t_stop.magnitude, t_stop.magnitude / occ)

    # Using matrix algebra to add all the delays
    s1_matr = (s1 * np.ones([xi - 1, occ])).T
    delays_matr = np.ones([occ, 1]) * delays.rescale(
        t_stop.units).magnitude.reshape([1, xi - 1])
    ss = s1_matr + delays_matr

    # Stacking the first and successive spikes
    stp = np.hstack((s1.reshape(occ, 1), ss))

    # Generating the background noise
    noise = [
        list(stg.homogeneous_poisson_process(rate, t_stop=t_stop).magnitude)
        for i in range(xi)
    ]
    # Transofm in to neo SpikeTrain
    stp = [
        neo.core.SpikeTrain((sorted(noise[i] + list(t))) * t_stop.units,
                            t_stop) for i, t in enumerate(stp.T)
    ]
    return stp
Beispiel #17
0
    def test_probability_matrix_symmetric(self):
        np.random.seed(1)
        kernel_width = 9 * pq.ms
        rate = 50 * pq.Hz
        n_spiketrains = 50
        spiketrains = []
        spiketrains_copy = []
        for _ in range(n_spiketrains):
            st = homogeneous_poisson_process(rate, t_stop=100 * pq.ms)
            spiketrains.append(st)
            spiketrains_copy.append(st.copy())

        asset_obj = asset.ASSET(spiketrains, bin_size=self.bin_size)
        asset_obj_symmetric = asset.ASSET(spiketrains,
                                          spiketrains_j=spiketrains_copy,
                                          bin_size=self.bin_size)

        imat = asset_obj.intersection_matrix()
        pmat = asset_obj.probability_matrix_analytical(
            kernel_width=kernel_width)

        imat_symm = asset_obj_symmetric.intersection_matrix()
        pmat_symm = asset_obj_symmetric.probability_matrix_analytical(
            kernel_width=kernel_width)

        assert_array_almost_equal(pmat, pmat_symm)
        assert_array_almost_equal(imat, imat_symm)
        assert_array_almost_equal(asset_obj.x_edges,
                                  asset_obj_symmetric.x_edges)
        assert_array_almost_equal(asset_obj.y_edges,
                                  asset_obj_symmetric.y_edges)
Beispiel #18
0
def make_test_spike_map(pos_fields,
                        box_size,
                        bin_size,
                        rate,
                        n_step=10**6,
                        step_size=.01):
    from elephant.spike_train_generation import homogeneous_poisson_process
    from scipy.interpolate import interp1d

    def infield(pos, pos_fields, sigma=.1):
        dist = np.sqrt(np.sum((pos - pos_fields)**2, axis=1))
        if any(dist <= sigma):
            return True
        else:
            return False

    t = np.linspace(0, n_step * step_size / 1.5, n_step)  # s / max_speed
    trajectory = random_walk(box_size, step_size, n_step)
    x, y = trajectory.T
    st = homogeneous_poisson_process(rate=rate / pq.s,
                                     t_start=0 * pq.s,
                                     t_stop=t[-1] * pq.s).magnitude

    spike_pos = np.array([interp1d(t, x)(st), interp1d(t, y)(st)])

    spikes = [
        times for times, pos in zip(st, spike_pos.T)
        if infield(pos, pos_fields)
    ]

    return np.array(x), np.array(y), np.array(t), np.array(spikes)
    def test_statistics(self):
        # There is a statistical test and has a non-zero chance of failure during normal operation.
        # Re-run the test to see if the error persists.
        for rate in [123.0*Hz, 0.123*kHz]:
            for t_stop in [2345*ms, 2.345*second]:
                spiketrain = stgen.homogeneous_poisson_process(rate, t_stop=t_stop)
                intervals = isi(spiketrain)

                expected_spike_count = int((rate * t_stop).simplified)
                self.assertLess(pdiff(expected_spike_count, spiketrain.size), 0.2)  # should fail about 1 time in 1000

                expected_mean_isi = (1/rate)
                self.assertLess(pdiff(expected_mean_isi, intervals.mean()), 0.2)

                expected_first_spike = 0*ms
                self.assertLess(spiketrain[0] - expected_first_spike, 7*expected_mean_isi)

                expected_last_spike = t_stop
                self.assertLess(expected_last_spike - spiketrain[-1], 7*expected_mean_isi)

                # Kolmogorov-Smirnov test
                D, p = kstest(intervals.rescale(t_stop.units),
                              "expon",
                              args=(0, expected_mean_isi.rescale(t_stop.units)),  # args are (loc, scale)
                              alternative='two-sided')
                self.assertGreater(p, 0.001)
                self.assertLess(D, 0.12)
 def test_corrcoef_fast_mode(self):
     np.random.seed(27)
     st = homogeneous_poisson_process(rate=10 * pq.Hz, t_stop=10 * pq.s)
     binned_st = conv.BinnedSpikeTrain(st, n_bins=10)
     assert_array_almost_equal(
         sc.correlation_coefficient(binned_st, fast=False),
         sc.correlation_coefficient(binned_st, fast=True))
Beispiel #21
0
 def test_small_kernel_sigma(self):
     # Test that the instantaneous rate is overestimated when
     # kernel.sigma << sampling_period and center_kernel is True.
     # The setup is set to match the issue 288.
     np.random.seed(9)
     sampling_period = 200 * pq.ms
     sigma = 5 * pq.ms
     rate_expected = 10 * pq.Hz
     spiketrain = homogeneous_poisson_process(rate_expected,
                                              t_start=0 * pq.s,
                                              t_stop=10 * pq.s)
     kernel_types = tuple(
         kern_cls for kern_cls in kernels.__dict__.values()
         if isinstance(kern_cls, type) and issubclass(
             kern_cls, kernels.Kernel) and kern_cls is not kernels.Kernel
         and kern_cls is not kernels.SymmetricKernel)
     for kern_cls, invert in itertools.product(kernel_types, (False, True)):
         kernel = kern_cls(sigma=sigma, invert=invert)
         with self.subTest(kernel=kernel):
             rate = statistics.instantaneous_rate(
                 spiketrain,
                 sampling_period=sampling_period,
                 kernel=kernel,
                 center_kernel=True)
             self.assertGreater(rate.mean(), rate_expected)
Beispiel #22
0
def LIF_ASC(we, wi, rateE, rateI):
    e_spike_train = np.asarray([
        homogeneous_poisson_process(rate=rateE * Hz,
                                    t_start=0.0 * s,
                                    t_stop=1.0 * s,
                                    as_array=True) for i in range(1)
    ])  #poisson_spike_trains(rate_e, T, 0)
    i_spike_train = np.asarray([
        homogeneous_poisson_process(rate=rateI * Hz,
                                    t_start=0.0 * s,
                                    t_stop=1.0 * s,
                                    as_array=True) for i in range(1)
    ])  #poisson_spike_trains(rate_i, T, 0)
    e_spike_train[:] = np.round_(e_spike_train[:] / dt)
    i_spike_train[:] = np.round_(i_spike_train[:] / dt)
    spikes = []
    for i in range(1, len(time)):

        dI = np.multiply(-1 * k, I[i - 1])
        I[i, :] = I[i - 1, :] + dI * dt

        dg_e = -g_e[i - 1] / tau_e
        g_e[i] = g_e[i - 1] + dg_e * dt

        dg_i = -g_i[i - 1] / tau_i
        g_i[i] = g_i[i - 1] + dg_i * dt

        dV = 1 / C * (np.sum(I[i - 1, :]) - G * (V[i - 1] - e_L) -
                      (g_e[i - 1] * (V[i - 1])) - (g_i[i - 1] *
                                                   (V[i - 1] - e_L)))
        V[i] = V[i - 1] + dV * dt

        # Check for a spike and update voltage etc.
        if V[i] > Theta_Inf + e_L:
            I[i, :] = R * I[i - 1, :] + deltaI
            V[i] = V_r
            spikes.append(i * dt)

        # check if time has passed a synapse event time.

        if np.isin(i, e_spike_train):
            g_e[i] = g_e[i - 1] + we

        if np.isin(i, i_spike_train):
            g_i[i] = g_i[i - 1] + wi

    return np.asarray(spikes)
Beispiel #23
0
 def test_spike_contrast_same_signal(self):
     np.random.seed(21)
     spike_train = stgen.homogeneous_poisson_process(rate=10 * Hz,
                                                     t_start=0. * ms,
                                                     t_stop=10000. * ms)
     spike_trains = [spike_train, spike_train]
     synchrony = spike_contrast(spike_trains, min_bin=1 * ms)
     self.assertEqual(synchrony, 1.0)
Beispiel #24
0
 def test_nondecrease_spike_times(self):
     for refractory_period in (None, 3 * pq.ms):
         np.random.seed(27)
         spiketrain = stgen.homogeneous_poisson_process(
             rate=10 * pq.Hz, t_stop=1000 * pq.s,
             refractory_period=refractory_period)
         diffs = np.diff(spiketrain.times)
         self.assertTrue((diffs >= 0).all())
 def test_buffer_overrun(self):
     np.random.seed(6085)  # this seed should produce a buffer overrun
     t_stop=1000*ms
     rate = 10*Hz
     spiketrain = stgen.homogeneous_poisson_process(rate, t_stop=t_stop)
     expected_last_spike = t_stop
     expected_mean_isi = (1/rate).rescale(ms)
     self.assertLess(expected_last_spike - spiketrain[-1], 4*expected_mean_isi)
Beispiel #26
0
def generate_poisson_spike_train(rate, tstop, tstart=0., as_array=True):
    from elephant.spike_train_generation import homogeneous_poisson_process
    from quantities import ms, Hz

    return homogeneous_poisson_process(rate * Hz,
                                       t_start=tstart * ms,
                                       t_stop=tstop * ms,
                                       as_array=as_array)
Beispiel #27
0
 def test_buffer_overrun(self):
     np.random.seed(6085)  # this seed should produce a buffer overrun
     t_stop=1000*ms
     rate = 10*Hz
     spiketrain = stgen.homogeneous_poisson_process(rate, t_stop=t_stop)
     expected_last_spike = t_stop
     expected_mean_isi = (1/rate).rescale(ms)
     self.assertLess(expected_last_spike - spiketrain[-1], 4*expected_mean_isi)
Beispiel #28
0
 def test_joint_isi_dithering_output(self):
     spiketrain = stg.homogeneous_poisson_process(
         rate=100. * pq.Hz,
         refractory_period=3 * pq.ms,
         t_stop=0.1 * pq.s)
     surrogate_train = surr.JointISI(spiketrain).dithering()[0]
     ground_truth = [0.005571, 0.018363, 0.026825, 0.036336, 0.045193,
                     0.05146, 0.058489, 0.078053]
     assert_array_almost_equal(surrogate_train.magnitude, ground_truth)
Beispiel #29
0
 def test_mean_firing_rate_typical_use_case(self):
     np.random.seed(92)
     st = homogeneous_poisson_process(rate=100 * pq.Hz, t_stop=100 * pq.s)
     rate1 = statistics.mean_firing_rate(st)
     rate2 = statistics.mean_firing_rate(st,
                                         t_start=st.t_start,
                                         t_stop=st.t_stop)
     self.assertEqual(rate1.units, rate2.units)
     self.assertAlmostEqual(rate1.item(), rate2.item())
Beispiel #30
0
    def setUp(self):
        def gen_gamma_spike_train(k, theta, t_max):
            x = []
            for i in range(int(3 * t_max / (k * theta))):
                x.append(np.random.gamma(k, theta))
            s = np.cumsum(x)
            return s[s < t_max]

        def gen_test_data(rates, durs, shapes=(1, 1, 1, 1)):
            s = gen_gamma_spike_train(shapes[0], 1. / rates[0], durs[0])
            for i in range(1, 4):
                s_i = gen_gamma_spike_train(shapes[i], 1. / rates[i], durs[i])
                s = np.concatenate([s, s_i + np.sum(durs[:i])])
            return s

        self.n_iters = 10
        self.bin_size = 20 * pq.ms

        # generate data1
        rates_a = (2, 10, 2, 2)
        rates_b = (2, 2, 10, 2)
        durs = (2.5, 2.5, 2.5, 2.5)
        np.random.seed(0)
        n_trials = 100
        self.data0 = []
        for trial in range(n_trials):
            n1 = neo.SpikeTrain(gen_test_data(rates_a, durs), units=1 * pq.s,
                                t_start=0 * pq.s, t_stop=10 * pq.s)
            n2 = neo.SpikeTrain(gen_test_data(rates_a, durs), units=1 * pq.s,
                                t_start=0 * pq.s, t_stop=10 * pq.s)
            n3 = neo.SpikeTrain(gen_test_data(rates_b, durs), units=1 * pq.s,
                                t_start=0 * pq.s, t_stop=10 * pq.s)
            n4 = neo.SpikeTrain(gen_test_data(rates_b, durs), units=1 * pq.s,
                                t_start=0 * pq.s, t_stop=10 * pq.s)
            n5 = neo.SpikeTrain(gen_test_data(rates_a, durs), units=1 * pq.s,
                                t_start=0 * pq.s, t_stop=10 * pq.s)
            n6 = neo.SpikeTrain(gen_test_data(rates_a, durs), units=1 * pq.s,
                                t_start=0 * pq.s, t_stop=10 * pq.s)
            n7 = neo.SpikeTrain(gen_test_data(rates_b, durs), units=1 * pq.s,
                                t_start=0 * pq.s, t_stop=10 * pq.s)
            n8 = neo.SpikeTrain(gen_test_data(rates_b, durs), units=1 * pq.s,
                                t_start=0 * pq.s, t_stop=10 * pq.s)
            self.data0.append([n1, n2, n3, n4, n5, n6, n7, n8])
        self.x_dim = 4

        self.data1 = self.data0[:20]

        # generate data2
        np.random.seed(27)
        self.data2 = []
        n_trials = 10
        n_channels = 20
        for trial in range(n_trials):
            rates = np.random.randint(low=1, high=100, size=n_channels)
            spike_times = [homogeneous_poisson_process(rate=rate * pq.Hz)
                           for rate in rates]
            self.data2.append(spike_times)
    def test_joint_isi_dithering_format(self):

        rate = 100. * pq.Hz
        t_stop = 1. * pq.s
        spiketrain = stg.homogeneous_poisson_process(rate, t_stop=t_stop)
        n_surrogates = 2
        dither = 10 * pq.ms

        # Test fast version
        joint_isi_instance = surr.JointISI(spiketrain, dither=dither)
        surrogate_trains = joint_isi_instance.dithering(
            n_surrogates=n_surrogates)

        self.assertIsInstance(surrogate_trains, list)
        self.assertEqual(len(surrogate_trains), n_surrogates)
        self.assertEqual(joint_isi_instance.method, 'fast')

        for surrogate_train in surrogate_trains:
            self.assertIsInstance(surrogate_train, neo.SpikeTrain)
            self.assertEqual(surrogate_train.units, spiketrain.units)
            self.assertEqual(surrogate_train.t_start, spiketrain.t_start)
            self.assertEqual(surrogate_train.t_stop, spiketrain.t_stop)
            self.assertEqual(len(surrogate_train), len(spiketrain))

        # Test window_version
        joint_isi_instance = surr.JointISI(spiketrain,
                                           method='window',
                                           dither=2 * dither,
                                           num_bins=50)
        surrogate_trains = joint_isi_instance.dithering(
            n_surrogates=n_surrogates)

        self.assertIsInstance(surrogate_trains, list)
        self.assertEqual(len(surrogate_trains), n_surrogates)
        self.assertEqual(joint_isi_instance.method, 'window')

        for surrogate_train in surrogate_trains:
            self.assertIsInstance(surrogate_train, neo.SpikeTrain)
            self.assertEqual(surrogate_train.units, spiketrain.units)
            self.assertEqual(surrogate_train.t_start, spiketrain.t_start)
            self.assertEqual(surrogate_train.t_stop, spiketrain.t_stop)
            self.assertEqual(len(surrogate_train), len(spiketrain))

        # Test surrogate methods wrapper
        surrogate_trains = surr.surrogates(spiketrain,
                                           n=n_surrogates,
                                           surr_method='joint_isi_dithering')
        self.assertIsInstance(surrogate_trains, list)
        self.assertEqual(len(surrogate_trains), n_surrogates)

        for surrogate_train in surrogate_trains:
            self.assertIsInstance(surrogate_train, neo.SpikeTrain)
            self.assertEqual(surrogate_train.units, spiketrain.units)
            self.assertEqual(surrogate_train.t_start, spiketrain.t_start)
            self.assertEqual(surrogate_train.t_stop, spiketrain.t_stop)
            self.assertEqual(len(surrogate_train), len(spiketrain))
Beispiel #32
0
 def test_t_start_and_t_stop(self):
     rate = 10 * pq.Hz
     t_start = 17 * pq.ms
     t_stop = 2 * pq.s
     for refractory_period in (None, 3 * pq.ms):
         spiketrain = stgen.homogeneous_poisson_process(
             rate=rate, t_start=t_start, t_stop=t_stop,
             refractory_period=refractory_period)
         self.assertEqual(spiketrain.t_start, t_start)
         self.assertEqual(spiketrain.t_stop, t_stop)
Beispiel #33
0
 def test_t_start_agnostic(self):
     np.random.seed(15)
     t_stop = 10 * second
     spike_train_1 = stgen.homogeneous_poisson_process(rate=10 * Hz,
                                                       t_stop=t_stop)
     spike_train_2 = stgen.homogeneous_poisson_process(rate=10 * Hz,
                                                       t_stop=t_stop)
     spiketrains = [spike_train_1, spike_train_2]
     synchrony_target = spike_contrast(spiketrains)
     # a check for developer: test meaningful result
     assert synchrony_target > 0
     t_shift = 20 * second
     spiketrains_shifted = [
         neo.SpikeTrain(st.times + t_shift,
                        t_start=t_shift,
                        t_stop=t_stop + t_shift) for st in spiketrains
     ]
     synchrony = spike_contrast(spiketrains_shifted)
     self.assertAlmostEqual(synchrony, synchrony_target)
 def test_low_rates(self):
     spiketrain = stgen.homogeneous_poisson_process(0*Hz, t_stop=1000*ms)
     self.assertEqual(spiketrain.size, 0)
     # not really a test, just making sure that all code paths are covered
     for i in range(10):
         spiketrain = stgen.homogeneous_poisson_process(1*Hz, t_stop=1000*ms)