Пример #1
0
    def test_corrcoef_binned_same_spiketrains(self):
        '''
        Test if the correlation coefficient between two identical binned spike
        trains evaluates to a 2x2 matrix of ones.
        '''
        # Calculate correlation
        binned_st = conv.BinnedSpikeTrain([self.st_0, self.st_0],
                                          t_start=0 * pq.ms,
                                          t_stop=50. * pq.ms,
                                          binsize=1 * pq.ms)
        result = sc.corrcoef(binned_st, fast=False)
        target = np.ones((2, 2))

        # Check dimensions
        self.assertEqual(len(result), 2)
        # Check result
        assert_array_almost_equal(result, target)
        assert_array_almost_equal(result, sc.corrcoef(binned_st, fast=True))
Пример #2
0
    def setUp(self):
        # These two arrays must be such that they do not have coincidences
        # spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
        self.test_array_1d_0 = [
            1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
        self.test_array_1d_1 = [
            1.02, 2.71, 18.82, 28.46, 28.79, 43.6]

        # Build spike trains
        self.st_0 = neo.SpikeTrain(
            self.test_array_1d_0, units='ms', t_stop=50.)
        self.st_1 = neo.SpikeTrain(
            self.test_array_1d_1, units='ms', t_stop=50.)

        # And binned counterparts
        self.binned_st = conv.BinnedSpikeTrain(
            [self.st_0, self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
            bin_size=1 * pq.ms)
Пример #3
0
    def test_rescale(self):
        train = neo.SpikeTrain(times=np.array([1.001, 1.002, 1.005]) * pq.s,
                               t_start=1 * pq.s,
                               t_stop=1.01 * pq.s)
        bst = cv.BinnedSpikeTrain(train,
                                  t_start=1 * pq.s,
                                  t_stop=1.01 * pq.s,
                                  bin_size=1 * pq.ms)
        self.assertEqual(bst.units, pq.s)
        self.assertEqual(bst._t_start, 1)  # 1 s
        self.assertEqual(bst._t_stop, 1.01)  # 1.01 s
        self.assertEqual(bst._bin_size, 0.001)  # 0.001 s

        bst.rescale(units='ms')
        self.assertEqual(bst.units, pq.ms)
        self.assertEqual(bst._t_start, 1000)  # 1 s
        self.assertEqual(bst._t_stop, 1010)  # 1.01 s
        self.assertEqual(bst._bin_size, 1)  # 0.001 s
Пример #4
0
    def setUp(self):
        # These two arrays must be such that they do not have coincidences
        # spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
        self.test_array_1d_1 = [
            1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2
        ]
        self.test_array_1d_2 = [1.02, 2.71, 18.82, 28.46, 28.79, 43.6]

        # Build spike trains
        self.st_1 = neo.SpikeTrain(self.test_array_1d_1,
                                   units='ms',
                                   t_stop=50.)
        self.st_2 = neo.SpikeTrain(self.test_array_1d_2,
                                   units='ms',
                                   t_stop=50.)

        # And binned counterparts
        self.binned_st1 = conv.BinnedSpikeTrain([self.st_1],
                                                t_start=0 * pq.ms,
                                                t_stop=50. * pq.ms,
                                                binsize=1 * pq.ms)
        self.binned_st2 = conv.BinnedSpikeTrain([self.st_2],
                                                t_start=0 * pq.ms,
                                                t_stop=50. * pq.ms,
                                                binsize=1 * pq.ms)
        self.binned_sts = conv.BinnedSpikeTrain([self.st_1, self.st_2],
                                                t_start=0 * pq.ms,
                                                t_stop=50. * pq.ms,
                                                binsize=1 * pq.ms)

        # Binned sts to check errors raising
        self.st_check_binsize = conv.BinnedSpikeTrain([self.st_1],
                                                      t_start=0 * pq.ms,
                                                      t_stop=50. * pq.ms,
                                                      binsize=5 * pq.ms)
        self.st_check_t_start = conv.BinnedSpikeTrain([self.st_1],
                                                      t_start=1 * pq.ms,
                                                      t_stop=50. * pq.ms,
                                                      binsize=1 * pq.ms)
        self.st_check_t_stop = conv.BinnedSpikeTrain([self.st_1],
                                                     t_start=0 * pq.ms,
                                                     t_stop=40. * pq.ms,
                                                     binsize=1 * pq.ms)
        self.st_check_dimension = conv.BinnedSpikeTrain([self.st_1, self.st_2],
                                                        t_start=0 * pq.ms,
                                                        t_stop=50. * pq.ms,
                                                        binsize=1 * pq.ms)
Пример #5
0
    def test_fpgrowth_fca(self):
        print("fim.so is found.")
        binary_matrix = conv.BinnedSpikeTrain(
            self.patt1, self.binsize).to_sparse_bool_array().tocoo()
        context, transactions, rel_matrix = spade._build_context(
            binary_matrix, self.winlen)
        # mining the data with python fast_fca
        mining_results_fpg = spade._fpgrowth(
            transactions,
            rel_matrix=rel_matrix)
        # mining the data with C fim
        mining_results_ffca = spade._fast_fca(context)

        # testing that the outputs are identical
        assert_array_equal(sorted(mining_results_ffca[0][0]), sorted(
            mining_results_fpg[0][0]))
        assert_array_equal(sorted(mining_results_ffca[0][1]), sorted(
            mining_results_fpg[0][1]))
Пример #6
0
    def test_covariance_binned_short_input(self):
        '''
        Test if input list of only one binned spike train yields correct result
        that matches numpy.cov (covariance with itself)
        '''
        # Calculate correlation
        binned_st = conv.BinnedSpikeTrain(
            self.st_0, t_start=0 * pq.ms, t_stop=50. * pq.ms,
            binsize=1 * pq.ms)
        target = sc.covariance(binned_st)

        # Check result unclipped against result calculated by numpy.corrcoef
        mat = binned_st.to_bool_array()
        target_numpy = np.cov(mat)

        # Check result and dimensionality of result
        self.assertEqual(target.ndim, target_numpy.ndim)
        self.assertAlmostEqual(target, target_numpy)
 def test_cad_raise_error(self):
     # test error data input format
     self.assertRaises(TypeError, cad.cell_assembly_detection,
                       data=[[1, 2, 3], [3, 4, 5]],
                       maxlag=self.maxlag)
     # test error significance level
     self.assertRaises(ValueError, cad.cell_assembly_detection,
                       data=conv.BinnedSpikeTrain(
                           [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s),
                            neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)],
                           binsize=self.binsize),
                       maxlag=self.maxlag,
                       alpha=-3)
     # test error minimum number of occurrences
     self.assertRaises(ValueError, cad.cell_assembly_detection,
                       data=conv.BinnedSpikeTrain(
                           [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s),
                            neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)],
                           binsize=self.binsize),
                       maxlag=self.maxlag,
                       min_occ=-1)
     # test error minimum number of spikes in a pattern
     self.assertRaises(ValueError, cad.cell_assembly_detection,
                       data=conv.BinnedSpikeTrain(
                           [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s),
                            neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)],
                           binsize=self.binsize),
                       maxlag=self.maxlag,
                       max_spikes=1)
     # test error chunk size for variance computation
     self.assertRaises(ValueError, cad.cell_assembly_detection,
                       data=conv.BinnedSpikeTrain(
                           [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s),
                            neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)],
                           binsize=self.binsize),
                       maxlag=self.maxlag,
                       size_chunks=1)
     # test error maximum lag
     self.assertRaises(ValueError, cad.cell_assembly_detection,
                       data=conv.BinnedSpikeTrain(
                           [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s),
                            neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)],
                           binsize=self.binsize),
                       maxlag=1)
     # test error minimum length spike train
     self.assertRaises(ValueError, cad.cell_assembly_detection,
                       data=conv.BinnedSpikeTrain(
                           [neo.SpikeTrain([1, 2, 3]*pq.ms, t_stop=6*pq.ms),
                            neo.SpikeTrain([3, 4, 5]*pq.ms,
                                           t_stop=6*pq.ms)],
                           binsize=1*pq.ms),
                       maxlag=self.maxlag)
Пример #8
0
def te(mdf1):
    import numpy as np
    from idtxl.multivariate_te import MultivariateTE
    from idtxl.data import Data
    n_procs = 1

    settings = {
        'cmi_estimator': 'JidtDiscreteCMI',
        'n_perm_max_stat': 21,
        'max_lag_target': 5,
        'max_lag_sources': 5,
        'min_lag_sources': 4
    }
    settings['cmi_estimator'] = 'JidtDiscreteCMI'
    #JidtKraskovCMI
    binary_trains = []
    for spiketrain in mdf1.spiketrains:
        x = conv.BinnedSpikeTrain(spiketrain,
                                  binsize=5 * pq.ms,
                                  t_start=0 * pq.s)
        binary_trains.append(x.to_array())
    print(binary_trains)
    dat = Data(np.array(binary_trains), dim_order='spr')
    dat.n_procs = n_procs

    #import sklearn
    #NMF = sklearn.decomposition.NMF(sts)
    #print(NMF)

    settings = {
        'cmi_estimator': 'JidtKraskov',
        'max_lag_sources': 3,
        'max_lag_target': 3,
        'min_lag_sources': 1
    }
    print(dat)
    mte = MultivariateTE()

    #res_single = mte.analyse_single_target(settings=settings, data=data, target=3)
    res_full = mte.analyse_network(settings=settings, data=dat)

    # generate graph plots
    g_single = visualise_graph.plot_selected_vars(res_single, mte)
    g_full = visualise_graph.plot_network(res_full)
    def test_binned_spiketrain_bin_edges(self):
        a = self.spiketrain_a
        x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s, num_bins=10,
                                t_stop=10. * pq.s)
        # Test all edges
        edges = [float(i) for i in range(11)]
        self.assertTrue(np.array_equal(x.bin_edges, edges))

        # Test left edges
        edges = [float(i) for i in range(10)]
        self.assertTrue(np.array_equal(x.bin_edges[:-1], edges))

        # Test right edges
        edges = [float(i) for i in range(1, 11)]
        self.assertTrue(np.array_equal(x.bin_edges[1:], edges))

        # Test center edges
        edges = np.arange(0, 10) + 0.5
        self.assertTrue(np.array_equal(x.bin_centers, edges))
    def test_empty_spike_train(self):
        '''
        Test whether a warning is yielded in case of empty spike train.
        Also check correctness of the output array.
        '''
        # st_2 is empty
        binned_12 = conv.BinnedSpikeTrain([self.st_1, self.st_2],
                                          binsize=1 * pq.ms)

        # test for a warning
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            ccmat = sc.corrcoef(binned_12)
            self.assertTrue(issubclass(w.pop().category, UserWarning))

        # test for NaNs in the output array
        target = np.zeros((2, 2)) * np.NaN
        target[0, 0] = 1.0
        assert_array_equal(ccmat, target)
Пример #11
0
    def convert_one_population_to_rates(self, recordings_index, trial_index,
                                        brain_area):
        path = self.all_data_path + '/' + self.selected_recordings[
            recordings_index]
        trials = np.load(path + '/' + 'trials.intervals.npy')
        spike_times_lst = self.get_spikes_of_one_population(
            recordings_index, brain_area)

        rates_lst = []
        spk_tr_lst = []
        for spk_tms_one_neuron in spike_times_lst:
            spks_range = np.bitwise_and(
                spk_tms_one_neuron >= trials[trial_index][0],
                spk_tms_one_neuron <= trials[trial_index][1])
            subset = spk_tms_one_neuron[spks_range]

            #Create elephant SpikeTrain object
            spk_tr = neo.SpikeTrain(subset * pq.s,
                                    t_start=trials[trial_index][0] * pq.s,
                                    t_stop=trials[trial_index][1] * pq.s)
            #plt.eventplot(spk_tr)
            #plt.show()
            kernel = kernels.GaussianKernel(sigma=0.1 * pq.s, invert=True)
            #sampling_rate the same as behavior
            r = instantaneous_rate(spk_tr,
                                   t_start=trials[trial_index][0] * pq.s,
                                   t_stop=trials[trial_index][1] * pq.s,
                                   sampling_period=0.02524578 * pq.s,
                                   kernel=kernel)  #cutoff=5.0)
            binned_spk_tr = conv.BinnedSpikeTrain(
                spk_tr,
                binsize=0.02524578 * pq.s,
                t_start=trials[trial_index][0] * pq.s)
            binned_spk_tr = binned_spk_tr.to_array()
            spk_tr_lst.append(binned_spk_tr)
            rates_lst.append(r.flatten())

        rates_lst = np.array(rates_lst)
        spk_tr_lst = np.array(spk_tr_lst)
        print(spk_tr_lst)
        #print(rates_lst.shape)
        return rates_lst, spk_tr_lst
Пример #12
0
    def test_covariance_binned_short_input(self):
        """
        Test if input list of only one binned spike train yields correct result
        that matches numpy.cov (covariance with itself)
        """
        # Calculate correlation
        binned_st = conv.BinnedSpikeTrain(
            self.st_0, t_start=0 * pq.ms, t_stop=50. * pq.ms,
            bin_size=1 * pq.ms)
        result = sc.covariance(binned_st, binary=True, fast=False)

        # Check result unclipped against result calculated by numpy.corrcoef
        mat = binned_st.to_bool_array()
        target = np.cov(mat)

        # Check result and dimensionality of result
        self.assertEqual(result.ndim, target.ndim)
        assert_array_almost_equal(result, target)
        assert_array_almost_equal(target,
                                  sc.covariance(binned_st, binary=True,
                                                fast=True))
Пример #13
0
    def test_binned_spiketrain_neg_times_list(self):
        a = neo.SpikeTrain(
            [-6.5, 0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s,
            t_start=-7 * pq.s, t_stop=7 * pq.s)
        b = neo.SpikeTrain(
            [-0.1, -0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s,
            t_start=-1 * pq.s, t_stop=8 * pq.s)
        spiketrains = [a, b]

        # not the same t_start and t_stop
        self.assertRaises(ValueError, cv.BinnedSpikeTrain,
                          spiketrains=spiketrains,
                          bin_size=self.bin_size)
        t_start, t_stop = get_common_start_stop_times(spiketrains)
        self.assertEqual(t_start, -1 * pq.s)
        self.assertEqual(t_stop, 7 * pq.s)
        x_bool = cv.BinnedSpikeTrain(spiketrains, bin_size=self.bin_size,
                                     t_start=t_start, t_stop=t_stop)
        y_bool = [[0, 1, 1, 0, 1, 1, 1, 1],
                  [1, 0, 1, 1, 0, 1, 1, 0]]

        assert_array_equal(x_bool.to_bool_array(), y_bool)
Пример #14
0
def st2trans(sts, wndlen, width):
    """
    Turn a list of spike trains into a list of transaction.

    Parameters
    ----------
    sts : list
    List of neo.Spike_trains to be converted
    wndlen : int
    length of sliding window
    width : quantity
    length of the binsize used to bin the data

    Returs
    --------
    trans : list
    List of all transactions, each element of the list contains the attributes
    of the corresponding object
    """
    # Bin the spike trains
    sts_bool = conv.BinnedSpikeTrain(sts, binsize=width).to_bool_array()
    MultiTimer("  st2trans bin spikes")

    # List of all the possible attributes (spikes)
    attributes = np.array(
        [s * wndlen + t for s in range(len(sts)) for t in range(wndlen)])
    trans = []

    # Assigning to each of the oject (window) his attributes (spikes)
    for w in range(sts_bool.shape[1] - wndlen + 1):
        currentWindow = sts_bool[:, w:w + wndlen]
        # only keep windows that start with a spike
        if np.add.reduce(currentWindow[:, 0]) == 0:
            continue
        trans.append(attributes[currentWindow.flatten()])
        MultiTimer("  st2trans assign attributes")
    return trans
Пример #15
0
    def test_binned_to_binned(self):
        a = self.spiketrain_a
        x = cv.BinnedSpikeTrain(a, bin_size=1 * pq.s).to_array()
        y = cv.BinnedSpikeTrain(x, bin_size=1 * pq.s, t_start=0 * pq.s)
        assert_array_equal(y.to_array(), x)

        # test with a list
        x = cv.BinnedSpikeTrain([[0, 1, 2, 3]],
                                bin_size=1 * pq.s,
                                t_stop=3 * pq.s).to_array()
        y = cv.BinnedSpikeTrain(x, bin_size=1 * pq.s, t_start=0 * pq.s)
        assert_array_equal(y.to_array(), x)

        # test with a numpy array
        a = np.array([[0, 1, 2, 3], [1, 2, 2.5, 3]])
        x = cv.BinnedSpikeTrain(a, bin_size=1 * pq.s,
                                t_stop=3 * pq.s).to_array()
        y = cv.BinnedSpikeTrain(x, bin_size=1 * pq.s, t_start=0 * pq.s)
        assert_array_equal(y.to_array(), x)

        # Check binary property
        self.assertFalse(y.is_binary)

        # Raise Errors
        # give a strangely shaped matrix as input (not MxN)
        a = np.array([[0, 1, 2, 3], [1, 2, 3]], dtype=object)
        self.assertRaises(ValueError,
                          cv.BinnedSpikeTrain,
                          a,
                          t_start=0 * pq.s,
                          bin_size=1 * pq.s)
        # Give no t_start or t_stop
        a = np.array([[0, 1, 2, 3], [1, 2, 3, 4]])
        self.assertRaises(ValueError,
                          cv.BinnedSpikeTrain,
                          a,
                          bin_size=1 * pq.s)
        # Input format not supported
        a = np.array(([0, 1, 2], [0, 1, 2, 3, 4]), dtype=object)
        self.assertRaises(ValueError,
                          cv.BinnedSpikeTrain,
                          a,
                          bin_size=1 * pq.s)
Пример #16
0
    def test_binned_to_binned(self):
        a = self.spiketrain_a
        x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s).to_array()
        y = cv.BinnedSpikeTrain(x, binsize=1 * pq.s, t_start=0 * pq.s)
        self.assertTrue(np.array_equal(x, y.to_array()))

        # test with a list
        x = cv.BinnedSpikeTrain([[0, 1, 2, 3]],
                                binsize=1 * pq.s,
                                t_stop=3 * pq.s).to_array()
        y = cv.BinnedSpikeTrain(x, binsize=1 * pq.s, t_start=0 * pq.s)
        self.assertTrue(np.array_equal(x, y.to_array()))

        # test with a numpy array
        a = np.array([[0, 1, 2, 3], [1, 2, 2.5, 3]])
        x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s,
                                t_stop=3 * pq.s).to_array()
        y = cv.BinnedSpikeTrain(x, binsize=1 * pq.s, t_start=0 * pq.s)
        self.assertTrue(np.array_equal(x, y.to_array()))

        # Check binary property
        self.assertFalse(y.is_binary)

        # Raise Errors
        # give a strangely shaped matrix as input (not MxN), which should
        # produce a TypeError
        a = np.array([[0, 1, 2, 3], [1, 2, 3]])
        self.assertRaises(TypeError,
                          cv.BinnedSpikeTrain,
                          a,
                          t_start=0 * pq.s,
                          binsize=1 * pq.s)
        # Give no t_start or t_stop
        a = np.array([[0, 1, 2, 3], [1, 2, 3, 4]])
        self.assertRaises(AttributeError,
                          cv.BinnedSpikeTrain,
                          a,
                          binsize=1 * pq.s)
        # Input format not supported
        a = np.array(([0, 1, 2], [0, 1, 2, 3, 4]))
        self.assertRaises(TypeError, cv.BinnedSpikeTrain, a, binsize=1 * pq.s)
def jointJ_window_analysis(data,
                           binsize,
                           winsize,
                           winstep,
                           pattern_hash,
                           method='analytic_TrialByTrial',
                           t_start=None,
                           t_stop=None,
                           binary=True,
                           **kwargs):
    """
    Calculates the joint surprise in a sliding window fashion

    Parameters:
    ----------
    data: list of neo.SpikeTrain objects
          list of spike trains in different trials
                                        0-axis --> Trials
                                        1-axis --> Neurons
                                        2-axis --> Spike times
    binsize: Quantity scalar with dimension time
           size of bins for descritizing spike trains
    winsize: Quantity scalar with dimension time
           size of the window of analysis
    winstep: Quantity scalar with dimension time
           size of the window step
    pattern_hash: list of integers
           list of interested patterns in hash values
           (see hash_from_pattern and inverse_hash_from_pattern functions)
    method: string
            method with which the unitary events whould be computed
            'analytic_TrialByTrial' -- > calculate the expectency
            (analytically) on each trial, then sum over all trials.
            'analytic_TrialAverage' -- > calculate the expectency
            by averaging over trials.
            (cf. Gruen et al. 2003)
            'surrogate_TrialByTrial' -- > calculate the distribution 
            of expected coincidences by spike time randomzation in 
            each trial and sum over trials.
            Default is 'analytic_trialByTrial'
    t_start: float or Quantity scalar, optional
             The start time to use for the time points.
             If not specified, retrieved from the `t_start`
             attribute of `spiketrain`.
    t_stop: float or Quantity scalar, optional
            The start time to use for the time points.
            If not specified, retrieved from the `t_stop`
            attribute of `spiketrain`.

    kwargs:
    -------
    n_surr: integer
            number of surrogate to be used
            Default is 100


    Returns:
    -------
    result: dictionary
          Js: list of float
                 JointSurprise of different given patterns within each window
                 shape: different pattern hash --> 0-axis
                        different window --> 1-axis
          indices: list of list of integers
                 list of indices of pattern within each window
                 shape: different pattern hash --> 0-axis
                        different window --> 1-axis
          n_emp: list of integers
                 empirical number of each observed pattern.
                 shape: different pattern hash --> 0-axis
                        different window --> 1-axis
          n_exp: list of floats
                 expeced number of each pattern.
                 shape: different pattern hash --> 0-axis
                        different window --> 1-axis
          rate_avg: list of floats
                 average firing rate of each neuron
                 shape: different pattern hash --> 0-axis
                        different window --> 1-axis

    """
    if not isinstance(data[0][0], neo.SpikeTrain):
        raise ValueError(
            "structure of the data is not correct: 0-axis should be trials, 1-axis units and 2-axis neo spike trains"
        )

    if t_start is None:
        t_start = data[0][0].t_start.rescale('ms')
    if t_stop is None:
        t_stop = data[0][0].t_stop.rescale('ms')

    # position of all windows (left edges)
    t_winpos = _winpos(t_start, t_stop, winsize, winstep, position='left-edge')
    t_winpos_bintime = _bintime(t_winpos, binsize)

    winsize_bintime = _bintime(winsize, binsize)
    winstep_bintime = _bintime(winstep, binsize)

    if winsize_bintime * binsize != winsize:
        warnings.warn("ratio between winsize and binsize is not integer -- "
                      "the actual number for window size is " +
                      str(winsize_bintime * binsize))

    if winstep_bintime * binsize != winstep:
        warnings.warn("ratio between winsize and binsize is not integer -- "
                      "the actual number for window size is" +
                      str(winstep_bintime * binsize))

    num_tr, N = np.shape(data)[:2]

    n_bins = int((t_stop - t_start) / binsize)

    mat_tr_unit_spt = np.zeros((len(data), N, n_bins))
    for tr, sts in enumerate(data):
        bs = conv.BinnedSpikeTrain(sts,
                                   t_start=t_start,
                                   t_stop=t_stop,
                                   binsize=binsize)
        if binary is True:
            mat = bs.to_bool_array()
        else:
            raise ValueError(
                "The method only works on the zero_one matrix at the moment")
        mat_tr_unit_spt[tr] = mat

    num_win = len(t_winpos)
    Js_win, n_exp_win, n_emp_win = (np.zeros(num_win) for _ in range(3))
    rate_avg = np.zeros((num_win, N))
    indices_win = {}
    for i in range(num_tr):
        indices_win['trial' + str(i)] = []

    for i, win_pos in enumerate(t_winpos_bintime):
        mat_win = mat_tr_unit_spt[:, :, win_pos:win_pos + winsize_bintime]
        if method == 'surrogate_TrialByTrial':
            if 'n_surr' in kwargs:
                n_surr = kwargs['n_surr']
            else:
                n_surr = 100
            Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[
                i], indices_lst = _UE(mat_win,
                                      N,
                                      pattern_hash,
                                      method,
                                      n_surr=n_surr)
        else:
            Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[
                i], indices_lst = _UE(mat_win, N, pattern_hash, method)
        for j in range(num_tr):
            if len(indices_lst[j][0]) > 0:
                indices_win['trial' + str(j)] = np.append(
                    indices_win['trial' + str(j)], indices_lst[j][0] + win_pos)
    return {
        'Js': Js_win,
        'indices': indices_win,
        'n_emp': n_emp_win,
        'n_exp': n_exp_win,
        'rate_avg': rate_avg / binsize
    }
# Rates
cc['rate'] = [
    elephant.statistics.mean_firing_rate(st).rescale("Hz").magnitude
    for st in sts]

# CV and LV
isis = [elephant.statistics.isi(st) for st in sts]

cc['cv'] = [elephant.statistics.cv(isi) for isi in isis if len(isi) > 1]
cc['lv'] = [elephant.statistics.lv(isi) for isi in isis if len(isi) > 1]


# original corrcoeff
t0 = time.time()
cco = stc.corrcoef(conv.BinnedSpikeTrain(sts, lag_res))
cco_neg = cco * [cco < 0] + [cco > 0]
cco_pos = cco * [cco > 0] + [cco < 0] * np.array([-1])
cc['corr_coeff'] = cco


print 'Computed corrcoeff'
t1 = time.time()
surr = [elephant.spike_train_surrogates.dither_spike_train(
        st, shift=50. * pq.ms, n=num_surrs) for st in sts]

print 'Generated surrogate'
t2 = time.time()
cco_surr = []
for idx_surr in range(num_surrs):
    cco_surr.append(stc.corrcoef(conv.BinnedSpikeTrain([
Пример #19
0
    def test_cross_correlation_histogram(self):
        '''
        Test generic result of a cross-correlation histogram between two binned
        spike trains.
        '''
        # Calculate CCH using Elephant (normal and binary version) with
        # mode equal to 'full' (whole spike trains are correlated)
        cch_clipped, bin_ids_clipped = sc.cross_correlation_histogram(
            self.binned_st1, self.binned_st2, window='full',
            binary=True)
        cch_unclipped, bin_ids_unclipped = sc.cross_correlation_histogram(
            self.binned_st1, self.binned_st2, window='full', binary=False)

        cch_clipped_mem, bin_ids_clipped_mem = sc.cross_correlation_histogram(
            self.binned_st1, self.binned_st2, window='full',
            binary=True, method='memory')
        cch_unclipped_mem, bin_ids_unclipped_mem = \
            sc.cross_correlation_histogram(
                self.binned_st1, self.binned_st2, window='full',
                binary=False, method='memory')
        # Check consistency two methods
        assert_array_equal(
            np.squeeze(cch_clipped.magnitude), np.squeeze(
                cch_clipped_mem.magnitude))
        assert_array_equal(
            np.squeeze(cch_clipped.times), np.squeeze(
                cch_clipped_mem.times))
        assert_array_equal(
            np.squeeze(cch_unclipped.magnitude), np.squeeze(
                cch_unclipped_mem.magnitude))
        assert_array_equal(
            np.squeeze(cch_unclipped.times), np.squeeze(
                cch_unclipped_mem.times))
        assert_array_almost_equal(bin_ids_clipped, bin_ids_clipped_mem)
        assert_array_almost_equal(bin_ids_unclipped, bin_ids_unclipped_mem)

        # Check normal correlation Note: Use numpy correlate to verify result.
        # Note: numpy conventions for input array 1 and input array 2 are
        # swapped compared to Elephant!
        mat1 = self.binned_st1.to_array()[0]
        mat2 = self.binned_st2.to_array()[0]
        target_numpy = np.correlate(mat2, mat1, mode='full')
        assert_array_equal(
            target_numpy, np.squeeze(cch_unclipped.magnitude))

        # Check cross correlation function for several displacements tau
        # Note: Use Elephant corrcoeff to verify result
        tau = [-25.0, 0.0, 13.0]  # in ms
        for t in tau:
            # adjust t_start, t_stop to shift by tau
            t0 = np.min([self.st_1.t_start + t * pq.ms, self.st_2.t_start])
            t1 = np.max([self.st_1.t_stop + t * pq.ms, self.st_2.t_stop])
            st1 = neo.SpikeTrain(self.st_1.magnitude + t, units='ms',
                                 t_start=t0 * pq.ms, t_stop=t1 * pq.ms)
            st2 = neo.SpikeTrain(self.st_2.magnitude, units='ms',
                                 t_start=t0 * pq.ms, t_stop=t1 * pq.ms)
            binned_sts = conv.BinnedSpikeTrain([st1, st2],
                                               binsize=1 * pq.ms,
                                               t_start=t0 * pq.ms,
                                               t_stop=t1 * pq.ms)
            # caluclate corrcoef
            corrcoef = sc.corrcoef(binned_sts)[1, 0]

            # expand t_stop to have two spike trains with same length as st1,
            # st2
            st1 = neo.SpikeTrain(self.st_1.magnitude, units='ms',
                                 t_start=self.st_1.t_start,
                                 t_stop=self.st_1.t_stop + np.abs(t) * pq.ms)
            st2 = neo.SpikeTrain(self.st_2.magnitude, units='ms',
                                 t_start=self.st_2.t_start,
                                 t_stop=self.st_2.t_stop + np.abs(t) * pq.ms)
            binned_st1 = conv.BinnedSpikeTrain(
                st1, t_start=0 * pq.ms, t_stop=(50 + np.abs(t)) * pq.ms,
                binsize=1 * pq.ms)
            binned_st2 = conv.BinnedSpikeTrain(
                st2, t_start=0 * pq.ms, t_stop=(50 + np.abs(t)) * pq.ms,
                binsize=1 * pq.ms)
            # calculate CCHcoef and take value at t=tau
            CCHcoef, _ = sc.cch(binned_st1, binned_st2,
                                cross_corr_coef=True)
            left_edge = - binned_st1.num_bins + 1
            tau_bin = int(t / float(binned_st1.binsize.magnitude))
            assert_array_equal(
                corrcoef, CCHcoef[tau_bin - left_edge].magnitude)

        # Check correlation using binary spike trains
        mat1 = np.array(self.binned_st1.to_bool_array()[0], dtype=int)
        mat2 = np.array(self.binned_st2.to_bool_array()[0], dtype=int)
        target_numpy = np.correlate(mat2, mat1, mode='full')
        assert_array_equal(
            target_numpy, np.squeeze(cch_clipped.magnitude))

        # Check the time axis and bin IDs of the resulting AnalogSignal
        assert_array_almost_equal(
            (bin_ids_clipped - 0.5) * self.binned_st1.binsize,
            cch_unclipped.times)
        assert_array_almost_equal(
            (bin_ids_clipped - 0.5) * self.binned_st1.binsize,
            cch_clipped.times)

        # Calculate CCH using Elephant (normal and binary version) with
        # mode equal to 'valid' (only completely overlapping intervals of the
        # spike trains are correlated)
        cch_clipped, bin_ids_clipped = sc.cross_correlation_histogram(
            self.binned_st1, self.binned_st2, window='valid',
            binary=True)
        cch_unclipped, bin_ids_unclipped = sc.cross_correlation_histogram(
            self.binned_st1, self.binned_st2, window='valid',
            binary=False)
        cch_clipped_mem, bin_ids_clipped_mem = sc.cross_correlation_histogram(
            self.binned_st1, self.binned_st2, window='valid',
            binary=True, method='memory')
        cch_unclipped_mem, bin_ids_unclipped_mem = \
            sc.cross_correlation_histogram(
                self.binned_st1, self.binned_st2, window='valid',
                binary=False, method='memory')

        # Check consistency two methods
        assert_array_equal(
            np.squeeze(cch_clipped.magnitude), np.squeeze(
                cch_clipped_mem.magnitude))
        assert_array_equal(
            np.squeeze(cch_clipped.times), np.squeeze(
                cch_clipped_mem.times))
        assert_array_equal(
            np.squeeze(cch_unclipped.magnitude), np.squeeze(
                cch_unclipped_mem.magnitude))
        assert_array_equal(
            np.squeeze(cch_unclipped.times), np.squeeze(
                cch_unclipped_mem.times))
        assert_array_equal(bin_ids_clipped, bin_ids_clipped_mem)
        assert_array_equal(bin_ids_unclipped, bin_ids_unclipped_mem)

        # Check normal correlation Note: Use numpy correlate to verify result.
        # Note: numpy conventions for input array 1 and input array 2 are
        # swapped compared to Elephant!
        mat1 = self.binned_st1.to_array()[0]
        mat2 = self.binned_st2.to_array()[0]
        target_numpy = np.correlate(mat2, mat1, mode='valid')
        assert_array_equal(
            target_numpy, np.squeeze(cch_unclipped.magnitude))

        # Check correlation using binary spike trains
        mat1 = np.array(self.binned_st1.to_bool_array()[0], dtype=int)
        mat2 = np.array(self.binned_st2.to_bool_array()[0], dtype=int)
        target_numpy = np.correlate(mat2, mat1, mode='valid')
        assert_array_equal(
            target_numpy, np.squeeze(cch_clipped.magnitude))

        # Check the time axis and bin IDs of the resulting AnalogSignal
        assert_array_equal(
            (bin_ids_clipped - 0.5) * self.binned_st1.binsize,
            cch_unclipped.times)
        assert_array_equal(
            (bin_ids_clipped - 0.5) * self.binned_st1.binsize,
            cch_clipped.times)

        # Check for wrong window parameter setting
        self.assertRaises(
            KeyError, sc.cross_correlation_histogram, self.binned_st1,
            self.binned_st2, window='dsaij')
        self.assertRaises(
            KeyError, sc.cross_correlation_histogram, self.binned_st1,
            self.binned_st2, window='dsaij', method='memory')
Пример #20
0
import matplotlib.pyplot as plt
import elephant.conversion as conv
import elephant.spike_train_generation
import quantities as pq
import numpy as np
import elephant.cell_assembly_detection as cad
np.random.seed(30)
# Generate correlated data and bin it with a binsize of 10ms
sts = elephant.spike_train_generation.cpp(rate=15 * pq.Hz,
                                          A=[0] + [0.95] + [0] * 4 + [0.05],
                                          t_stop=10 * pq.s)
binsize = 10 * pq.ms
spM = conv.BinnedSpikeTrain(sts, binsize=binsize)
# Call of the method
patterns = cad.cell_assembly_detection(spM, maxlag=2)[0]
# plotting
plt.figure()
for neu in patterns['neurons']:
    if neu == 0:
        plt.plot(patterns['times'] * binsize, [neu] * len(patterns['times']),
                 'ro',
                 label='pattern')
    else:
        plt.plot(patterns['times'] * binsize, [neu] * len(patterns['times']),
                 'ro')
# Raster plot of the data
for st_idx, st in enumerate(sts):
    if st_idx == 0:
        plt.plot(st.rescale(pq.ms), [st_idx] * len(st), 'k.', label='spikes')
    else:
        plt.plot(st.rescale(pq.ms), [st_idx] * len(st), 'k.')
Пример #21
0
def time_histogram(spiketrains,
                   binsize,
                   t_start=None,
                   t_stop=None,
                   output='counts',
                   binary=False):
    """
    Time Histogram of a list of :attr:`neo.SpikeTrain` objects.

    Parameters
    ----------
    spiketrains : List of neo.SpikeTrain objects
        Spiketrains with a common time axis (same `t_start` and `t_stop`)
    binsize : quantities.Quantity
        Width of the histogram's time bins.
    t_start, t_stop : Quantity (optional)
        Start and stop time of the histogram. Only events in the input
        `spiketrains` falling between `t_start` and `t_stop` (both included)
        are considered in the histogram. If `t_start` and/or `t_stop` are not
        specified, the maximum `t_start` of all :attr:spiketrains is used as
        `t_start`, and the minimum `t_stop` is used as `t_stop`.
        Default: t_start = t_stop = None
    output : str (optional)
        Normalization of the histogram. Can be one of:
        * `counts`'`: spike counts at each bin (as integer numbers)
        * `mean`: mean spike counts per spike train
        * `rate`: mean spike rate per spike train. Like 'mean', but the
          counts are additionally normalized by the bin width.
    binary : bool (optional)
        If **True**, indicates whether all spiketrain objects should first
        binned to a binary representation (using the `BinnedSpikeTrain` class
        in the `conversion` module) and the calculation of the histogram is
        based on this representation.
        Note that the output is not binary, but a histogram of the converted,
        binary representation.
        Default: False

    Returns
    -------
    time_hist : neo.AnalogSignal
        A neo.AnalogSignal object containing the histogram values.
        `AnalogSignal[j]` is the histogram computed between
        `t_start + j * binsize` and `t_start + (j + 1) * binsize`.

    See also
    --------
    elephant.conversion.BinnedSpikeTrain
    """
    min_tstop = 0
    if t_start is None:
        # Find the internal range for t_start, where all spike trains are
        # defined; cut all spike trains taking that time range only
        max_tstart, min_tstop = conv._get_start_stop_from_input(spiketrains)
        t_start = max_tstart
        if not all([max_tstart == t.t_start for t in spiketrains]):
            warnings.warn("Spiketrains have different t_start values -- "
                          "using maximum t_start as t_start.")

    if t_stop is None:
        # Find the internal range for t_stop
        if min_tstop:
            t_stop = min_tstop
            if not all([min_tstop == t.t_stop for t in spiketrains]):
                warnings.warn("Spiketrains have different t_stop values -- "
                              "using minimum t_stop as t_stop.")
        else:
            min_tstop = conv._get_start_stop_from_input(spiketrains)[1]
            t_stop = min_tstop
            if not all([min_tstop == t.t_stop for t in spiketrains]):
                warnings.warn("Spiketrains have different t_stop values -- "
                              "using minimum t_stop as t_stop.")

    sts_cut = [
        st.time_slice(t_start=t_start, t_stop=t_stop) for st in spiketrains
    ]

    # Bin the spike trains and sum across columns
    bs = conv.BinnedSpikeTrain(sts_cut,
                               t_start=t_start,
                               t_stop=t_stop,
                               binsize=binsize)

    if binary:
        bin_hist = bs.to_sparse_bool_array().sum(axis=0)
    else:
        bin_hist = bs.to_sparse_array().sum(axis=0)
    # Flatten array
    bin_hist = np.ravel(bin_hist)
    # Renormalise the histogram
    if output == 'counts':
        # Raw
        bin_hist = bin_hist * pq.dimensionless
    elif output == 'mean':
        # Divide by number of input spike trains
        bin_hist = bin_hist * 1. / len(spiketrains) * pq.dimensionless
    elif output == 'rate':
        # Divide by number of input spike trains and bin width
        bin_hist = bin_hist * 1. / len(spiketrains) / binsize
    else:
        raise ValueError('Parameter output is not valid.')

    return neo.AnalogSignal(signal=bin_hist.reshape(bin_hist.size, 1),
                            sampling_period=binsize,
                            units=bin_hist.units,
                            t_start=t_start)
Пример #22
0
def time_histogram(spiketrains, bin_size, t_start=None, t_stop=None,
                   output='counts', binary=False):
    """
    Time Histogram of a list of `neo.SpikeTrain` objects.

    Parameters
    ----------
    spiketrains : list of neo.SpikeTrain
        `neo.SpikeTrain`s with a common time axis (same `t_start` and `t_stop`)
    bin_size : pq.Quantity
        Width of the histogram's time bins.
    t_start : pq.Quantity, optional
        Start time of the histogram. Only events in `spiketrains` falling
        between `t_start` and `t_stop` (both included) are considered in the
        histogram.
        If None, the maximum `t_start` of all `neo.SpikeTrain`s is used as
        `t_start`.
        Default: None.
    t_stop : pq.Quantity, optional
        Stop time of the histogram. Only events in `spiketrains` falling
        between `t_start` and `t_stop` (both included) are considered in the
        histogram.
        If None, the minimum `t_stop` of all `neo.SpikeTrain`s is used as
        `t_stop`.
        Default: None.
    output : {'counts', 'mean', 'rate'}, optional
        Normalization of the histogram. Can be one of:
        * 'counts': spike counts at each bin (as integer numbers)
        * 'mean': mean spike counts per spike train
        * 'rate': mean spike rate per spike train. Like 'mean', but the
          counts are additionally normalized by the bin width.
        Default: 'counts'.
    binary : bool, optional
        If True, indicates whether all `neo.SpikeTrain` objects should first
        be binned to a binary representation (using the
        `conversion.BinnedSpikeTrain` class) and the calculation of the
        histogram is based on this representation.
        Note that the output is not binary, but a histogram of the converted,
        binary representation.
        Default: False.

    Returns
    -------
    neo.AnalogSignal
        A `neo.AnalogSignal` object containing the histogram values.
        `neo.AnalogSignal[j]` is the histogram computed between
        `t_start + j * bin_size` and `t_start + (j + 1) * bin_size`.

    Raises
    ------
    ValueError
        If `output` is not 'counts', 'mean' or 'rate'.

    Warns
    -----
    UserWarning
        If `t_start` is None and the objects in `spiketrains` have different
        `t_start` values.
        If `t_stop` is None and the objects in `spiketrains` have different
        `t_stop` values.

    See also
    --------
    elephant.conversion.BinnedSpikeTrain

    """
    min_tstop = 0
    if t_start is None:
        # Find the internal range for t_start, where all spike trains are
        # defined; cut all spike trains taking that time range only
        max_tstart, min_tstop = conv._get_start_stop_from_input(spiketrains)
        t_start = max_tstart
        if not all([max_tstart == t.t_start for t in spiketrains]):
            warnings.warn(
                "Spiketrains have different t_start values -- "
                "using maximum t_start as t_start.")

    if t_stop is None:
        # Find the internal range for t_stop
        if not min_tstop:
            min_tstop = conv._get_start_stop_from_input(spiketrains)[1]
        t_stop = min_tstop
        if not all([min_tstop == t.t_stop for t in spiketrains]):
            warnings.warn(
                "Spiketrains have different t_stop values -- "
                "using minimum t_stop as t_stop.")

    sts_cut = [st.time_slice(t_start=t_start, t_stop=t_stop) for st in
               spiketrains]

    # Bin the spike trains and sum across columns
    bs = conv.BinnedSpikeTrain(sts_cut, t_start=t_start, t_stop=t_stop,
                               bin_size=bin_size)

    if binary:
        bin_hist = bs.to_sparse_bool_array().sum(axis=0)
    else:
        bin_hist = bs.to_sparse_array().sum(axis=0)
    # Flatten array
    bin_hist = np.ravel(bin_hist)
    # Renormalise the histogram
    if output == 'counts':
        # Raw
        bin_hist = bin_hist * pq.dimensionless
    elif output == 'mean':
        # Divide by number of input spike trains
        bin_hist = bin_hist * 1. / len(spiketrains) * pq.dimensionless
    elif output == 'rate':
        # Divide by number of input spike trains and bin width
        bin_hist = bin_hist * 1. / len(spiketrains) / bin_size
    else:
        raise ValueError('Parameter output is not valid.')

    return neo.AnalogSignal(signal=np.expand_dims(bin_hist, axis=1),
                            sampling_period=bin_size, units=bin_hist.units,
                            t_start=t_start)
Пример #23
0
 def test_corrcoef_fast_mode(self):
     np.random.seed(27)
     st = homogeneous_poisson_process(rate=10 * pq.Hz, t_stop=10 * pq.s)
     binned_st = conv.BinnedSpikeTrain(st, num_bins=10)
     assert_array_almost_equal(sc.corrcoef(binned_st, fast=False),
                               sc.corrcoef(binned_st, fast=True))
Пример #24
0
def compute_profiling_time(key, expected_num_spikes, rate, t_stop, n,
                           winlen, binsize, num_rep=10):
    """
    Function computing the profiling time needed to run SPADE on artificial
    poisson data of given rate, recording time, and number of neurons
    Parameters
    ----------
    key: list
        list of keys of the varying variable of the profiling analysis.
        Maximum of three keys, can be either 'neurons', 'time' and
        'rate'.
    expected_num_spikes: int
        expected number of spikes of the generated spike train
    rate: quantity
        rate of the poisson process
    t_stop: quantity
        duration of the spike trains
    n: int
        number of spike trains
    winlen: int
        window length for the SPADE analysis
    binsize: quantity
        binsize for the SPADE analysis
    num_rep: int
        number of repetitions of
    """

    time_fast_fca = 0.
    time_fpgrowth = 0.
    for rep in range(num_rep):
        # Generating artificial data
        data = []
        for i in range(n):
            np.random.seed(0)
            data.append(stg.homogeneous_poisson_process(
                rate=rate, t_start=0*pq.s, t_stop=t_stop))

        # Extracting Closed Frequent Itemset with FP-Growth
        t0 = time.time()
        # Binning the data and clipping (binary matrix)
        binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()
        # Computing the context and the binary matrix encoding the relation
        # between objects (window positions) and attributes (spikes,
        # indexed with a number equal to  neuron idx*winlen+bin idx)
        context, transactions, rel_matrix = spade._build_context(binary_matrix,
                                                                 winlen)
        # Applying FP-Growth
        fim_results = [i for i in spade._fpgrowth(
                    transactions,
                    rel_matrix=rel_matrix,
                    winlen=winlen)]
        time_fpgrowth += time.time() - t0

        # Extracting Closed Frequent Itemset with Fast_fca
        t1 = time.time()
        # Binning the data and clipping (binary matrix)
        binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()
        # Computing the context and the binary matrix encoding the relation
        # between objects (window positions) and attributes (spikes,
        # indexed with a number equal to  neuron idx*winlen+bin idx)
        context, transactions, rel_matrix = \
            spade._build_context(binary_matrix, winlen)
        # Applying FP-Growth
        fim_results = spade._fast_fca(context, winlen=winlen)
        time_fast_fca += time.time() - t1

    time_profiles = {'fp_growth': time_fpgrowth/num_rep,
                     'fast_fca': time_fast_fca/num_rep}

    # Storing data
    res_path = '../results/{}/{}/'.format(key, expected_num_spikes)
    # Create path is not already existing
    path_temp = './'
    for folder in split_path(res_path):
        path_temp = path_temp + '/' + folder
        mkdirp(path_temp)

    np.save(res_path + '/profiling_results.npy', {'results': time_profiles,
            'parameters': {'rate': rate, 't_stop': t_stop, 'n': n,
                           'winlen': winlen, 'binsize': binsize}})
Пример #25
0
PvSpec = fima_stp.pvspec(sts,
                         wndlen,
                         width,
                         dither=dither,
                         n=n_surr,
                         min_z=3,
                         min_c=3)

MultiTimer("Bootstrap 1")
nsSgnt = fima_stp.sspec(PvSpec, alpha, corr='fdr', report='e')

MultiTimer("Bootstrap 2")

# Conversion of data in transaction (input format for FP-growth algorithm)
binned_sts = conv.BinnedSpikeTrain(sts, width).to_array()
context, rel_matrix = fima_stp.buildContext(binned_sts, wndlen)
Trans = fima_stp.st2trans(sts, wndlen, width=width)

MultiTimer("Conversion to transaction")

# Mining the data with FP-growth algorithm
concepts_int = [
    i[0]
    for i in fima_stp.fpgrowth(Trans, target='c', min_z=3, min_c=3, report='a')
]

MultiTimer("FP-growth")

# Computing the stability measure (Computational expensive)
concepts = fima_stp._approximate_stability_extensional(concepts_int,
Пример #26
0
def jointJ_window_analysis(spiketrains, bin_size=5 * pq.ms,
                           win_size=100 * pq.ms, win_step=5 * pq.ms,
                           pattern_hash=None, method='analytic_TrialByTrial',
                           t_start=None, t_stop=None, binary=True,
                           n_surrogates=100):
    """
    Calculates the joint surprise in a sliding window fashion.

    Implementation is based on :cite:`unitary_event_analysis-Gruen99_67`.

    Parameters
    ----------
    spiketrains : list
        A list of spike trains (`neo.SpikeTrain` objects) in different trials:
          * 0-axis --> Trials

          * 1-axis --> Neurons

          * 2-axis --> Spike times
    bin_size : pq.Quantity, optional
        The size of bins for discretizing spike trains.
        Default: 5 ms
    win_size : pq.Quantity, optional
        The size of the window of analysis.
        Default: 100 ms
    win_step : pq.Quantity, optional
        The size of the window step.
        Default: 5 ms
    pattern_hash : int or list of int or None, optional
        A list of interested patterns in hash values (see `hash_from_pattern`
        and `inverse_hash_from_pattern` functions). If None, all neurons
        are participated.
        Default: None
    method : str, optional
        The method with which to compute the unitary events:
          * 'analytic_TrialByTrial': calculate the analytical expectancy
            on each trial, then sum over all trials;

          * 'analytic_TrialAverage': calculate the expectancy by averaging over
            trials (cf. Gruen et al. 2003);

          * 'surrogate_TrialByTrial': calculate the distribution of expected
            coincidences by spike time randomization in each trial and sum over
            trials.
        Default: 'analytic_trialByTrial'
    t_start, t_stop : float or pq.Quantity, optional
        The start and stop times to use for the time points.
        If not specified, retrieved from the `t_start` and `t_stop` attributes
        of the input spiketrains.
    binary : bool, optional
        Binarize the binned spike train objects (True) or not. Only the binary
        matrices are supported at the moment.
        Default: True
    n_surrogates : int, optional
        The number of surrogates to be used.
        Default: 100

    Returns
    -------
    dict
        The values of the following keys have the shape of

          * different window --> 0-axis
          * different pattern hash --> 1-axis

        'Js': list of float
          JointSurprise of different given patterns within each window.
        'indices': list of list of int
          A list of indices of pattern within each window.
        'n_emp': list of int
          The empirical number of each observed pattern.
        'n_exp': list of float
          The expected number of each pattern.
        'rate_avg': list of float
          The average firing rate of each neuron.

        Additionally, 'input_parameters' key stores the input parameters.

    Raises
    ------
    ValueError
        If `data` is not in the format, specified above.
    NotImplementedError
        If `binary` is not True. The method works only with binary matrices at
        the moment.

    Warns
    -----
    UserWarning
        The ratio between `winsize` or `winstep` and `bin_size` is not an
        integer.

    """
    if not isinstance(spiketrains[0][0], neo.SpikeTrain):
        raise ValueError(
            "structure of the data is not correct: 0-axis should be trials, "
            "1-axis units and 2-axis neo spike trains")

    if t_start is None:
        t_start = spiketrains[0][0].t_start
    if t_stop is None:
        t_stop = spiketrains[0][0].t_stop

    n_trials = len(spiketrains)
    n_neurons = len(spiketrains[0])
    if pattern_hash is None:
        pattern = [1] * n_neurons
        pattern_hash = hash_from_pattern(pattern)
    if np.issubdtype(type(pattern_hash), np.integer):
        pattern_hash = [int(pattern_hash)]

    # position of all windows (left edges)
    t_winpos = _winpos(t_start, t_stop, win_size, win_step,
                       position='left-edge')
    t_winpos_bintime = _bintime(t_winpos, bin_size)

    winsize_bintime = _bintime(win_size, bin_size)
    winstep_bintime = _bintime(win_step, bin_size)

    if winsize_bintime * bin_size != win_size:
        warnings.warn(f"The ratio between the win_size ({win_size}) and the "
                      f"bin_size ({bin_size}) is not an integer")

    if winstep_bintime * bin_size != win_step:
        warnings.warn(f"The ratio between the win_step ({win_step}) and the "
                      f"bin_size ({bin_size}) is not an integer")

    input_parameters = dict(pattern_hash=pattern_hash, bin_size=bin_size,
                            win_size=win_size, win_step=win_step,
                            method=method, t_start=t_start, t_stop=t_stop,
                            n_surrogates=n_surrogates)

    n_bins = int(((t_stop - t_start) / bin_size).simplified.item())

    mat_tr_unit_spt = np.zeros((len(spiketrains), n_neurons, n_bins),
                               dtype=np.int32)
    for trial, sts in enumerate(spiketrains):
        bs = conv.BinnedSpikeTrain(list(sts), t_start=t_start, t_stop=t_stop,
                                   bin_size=bin_size)
        if not binary:
            raise NotImplementedError(
                "The method works only with binary matrices at the moment")
        mat_tr_unit_spt[trial] = bs.to_bool_array()

    n_windows = len(t_winpos)
    n_hashes = len(pattern_hash)
    Js_win, n_exp_win, n_emp_win = np.zeros((3, n_windows, n_hashes),
                                            dtype=np.float32)
    rate_avg = np.zeros((n_windows, n_hashes, n_neurons), dtype=np.float32)
    indices_win = defaultdict(list)

    for i, win_pos in enumerate(t_winpos_bintime):
        mat_win = mat_tr_unit_spt[:, :, win_pos:win_pos + winsize_bintime]
        Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[
            i], indices_lst = _UE(mat_win, pattern_hash=pattern_hash,
                                  method=method, n_surrogates=n_surrogates)
        for j in range(n_trials):
            if len(indices_lst[j][0]) > 0:
                indices_win[f"trial{j}"].append(indices_lst[j][0] + win_pos)
    for key in indices_win.keys():
        indices_win[key] = np.hstack(indices_win[key])
    return {'Js': Js_win, 'indices': indices_win, 'n_emp': n_emp_win,
            'n_exp': n_exp_win, 'rate_avg': rate_avg / bin_size,
            'input_parameters': input_parameters}
Пример #27
0
 def setUp(self):
     np.random.seed(0)
     # Spade parameters
     self.bin_size = 1 * pq.ms
     self.winlen = 10
     self.n_subset = 10
     self.n_surr = 10
     self.alpha = 0.05
     self.stability_thresh = [0.1, 0.1]
     self.psr_param = [0, 0, 0]
     self.min_occ = 4
     self.min_spikes = 4
     self.max_occ = 4
     self.max_spikes = 4
     self.min_neu = 4
     # Test data parameters
     # CPP parameters
     self.n_neu = 100
     self.amplitude = [0] * self.n_neu + [1]
     self.cpp = stg.cpp(rate=3 * pq.Hz,
                        amplitude_distribution=self.amplitude,
                        t_stop=5 * pq.s)
     # Number of patterns' occurrences
     self.n_occ1 = 10
     self.n_occ2 = 12
     self.n_occ3 = 15
     # Patterns lags
     self.lags1 = [2]
     self.lags2 = [1, 3]
     self.lags3 = [1, 2, 4, 5, 7]
     # Length of the spiketrain
     self.t_stop = 3000
     # Patterns times
     self.patt1_times = neo.SpikeTrain(
         np.arange(0, 1000, 1000 // self.n_occ1) * pq.ms,
         t_stop=self.t_stop * pq.ms)
     self.patt2_times = neo.SpikeTrain(
         np.arange(1000, 2000, 1000 // self.n_occ2)[:-1] * pq.ms,
         t_stop=self.t_stop * pq.ms)
     self.patt3_times = neo.SpikeTrain(
         np.arange(2000, 3000, 1000 // self.n_occ3)[:-1] * pq.ms,
         t_stop=self.t_stop * pq.ms)
     # Patterns
     self.patt1 = [self.patt1_times] + [
         neo.SpikeTrain(self.patt1_times.view(pq.Quantity) + lag * pq.ms,
                        t_stop=self.t_stop * pq.ms) for lag in self.lags1
     ]
     self.patt2 = [self.patt2_times] + [
         neo.SpikeTrain(self.patt2_times.view(pq.Quantity) + lag * pq.ms,
                        t_stop=self.t_stop * pq.ms) for lag in self.lags2
     ]
     self.patt3 = [self.patt3_times] + [
         neo.SpikeTrain(self.patt3_times.view(pq.Quantity) + lag * pq.ms,
                        t_stop=self.t_stop * pq.ms) for lag in self.lags3
     ]
     # Data
     self.msip = self.patt1 + self.patt2 + self.patt3
     # Expected results
     self.n_spk1 = len(self.lags1) + 1
     self.n_spk2 = len(self.lags2) + 1
     self.n_spk3 = len(self.lags3) + 1
     self.elements1 = list(range(self.n_spk1))
     self.elements2 = list(range(self.n_spk2))
     self.elements3 = list(range(self.n_spk3))
     self.elements_msip = [
         self.elements1,
         list(range(self.n_spk1, self.n_spk1 + self.n_spk2)),
         list(
             range(self.n_spk1 + self.n_spk2,
                   self.n_spk1 + self.n_spk2 + self.n_spk3))
     ]
     self.occ1 = np.unique(
         conv.BinnedSpikeTrain(self.patt1_times,
                               self.bin_size).spike_indices[0])
     self.occ2 = np.unique(
         conv.BinnedSpikeTrain(self.patt2_times,
                               self.bin_size).spike_indices[0])
     self.occ3 = np.unique(
         conv.BinnedSpikeTrain(self.patt3_times,
                               self.bin_size).spike_indices[0])
     self.occ_msip = [list(self.occ1), list(self.occ2), list(self.occ3)]
     self.lags_msip = [self.lags1, self.lags2, self.lags3]
     self.patt_psr = self.patt3 + [self.patt3[-1][:3]]
Пример #28
0
def psf(sts,
        wndlen,
        width,
        dither,
        alpha,
        min_z=2,
        min_c=2,
        compute_stability=True,
        filter_concepts=True,
        n=100,
        corr='f',
        n_samples=100,
        verbose=False):
    '''
    performs pattern spectrum filtering (PSF) on a list of parallel spike
    trains.

    INPUT:
    x [list]
        list of neo.core.SpikeTrain objects, interpreted as parallel
        spike trains, or list of (ID, train) pairs. The IDs must be
        hashable. If not specified, they are set to integers 0,1,2,...
    width [quantity.Quantity]
        width of the time window used to determine spike synchrony
    dither : Quantity, optional
        For methods shifting spike times randomly around their original time
        (spike dithering, train shifting) or replacing them randomly within a
        certain window (spike jittering), dt represents the size of that
        dither / window. For other methods, dt is ignored.

    alpha [float]
        significance level of the statistical test
    min_z [int. Default: 2]
        minimum size for a set of synchronous spikes to be considered
        a pattern
    min_c [int. Default: 2]
        minimum support for patterns to be considered frequent
    compute_stability [bool]
        If True the stability of all the concepts is compute. The output
        depends on the choose of the parameter filter_concepts.
        If False only the significant concepts (pattern spectrum filtering)
        are returned
    filter_concepts [bool]
        In the case compute stability is False this parameter is ignored
        Otherwise if true only concepts with stability larger than 0.3 are
        returned and the concepts are filtered using the pattern spectrum
        If False all the concepts are returned
    n : int, optional
        amount of surrogates to generate to compute the p-value spectrum.
        Should be large (n>=1000 recommended for 100 spike trains in *x*)
        Default: 100
    corr [str. Default: 'f']
        statistical correction to be applied:
        '' : no statistical correction
        'f'|'fdr' : false discovery rate
        'b'|'bonf': Bonferroni correction
    verbose : bool, optional
        whether to print the status of the analysis; might be helpful for
        large n (the analysis can take a while!)

    OUTPUT:
        returns a triplet containing:
        * all the concepts with int or ext stab >=0.3
        * the significant patterns according to PSF
        * the P-value spectrum computed on surrogate data
        * the list of non-significant signatures inferred from the spectrum

    '''
    # Compute the p-value spectrum, and compute non-significant signatures
    if verbose is True:
        print 'psf(): compute p-value spectrum...'


#    if use_mpi:
#        PvSpec = pvspec_mpi(
#        sts, wndlen, width,  shift=shift, n=n, min=min, min_c=min_c)
#    else:
    t0 = time.time()
    PvSpec = pvspec(sts,
                    wndlen,
                    width,
                    dither=dither,
                    n=n,
                    min_z=min_z,
                    min_c=min_c)
    t1 = time.time()
    print 'pvspec time', t1 - t0
    comm = MPI.COMM_WORLD  # create MPI communicator
    rank = comm.Get_rank()  # get rank of current MPI task
    # Compute transactions and CFISs of *x*
    if verbose is True:
        print 'psf(): run FIM on input data...'
    binned_sts = conv.BinnedSpikeTrain(sts, width).to_array()
    context, rel_matrix = buildContext(binned_sts, wndlen)
    Trans = st2trans(sts, wndlen, width=width)
    print 'Done conv'
    concepts_int = [
        i[0] for i in fpgrowth(
            Trans, target='c', min_z=min_z, min_c=min_c, report='a')
    ]
    t2 = time.time()
    print 'time fpgrowth data', t2 - t1
    if compute_stability:
        # Computing the approximated stability of all the conepts
        concepts = _approximate_stability_extensional(concepts_int, rel_matrix,
                                                      wndlen, n_samples)
        t3 = time.time()
        print 'approx stability time', t3 - t2
        if rank == 0:
            if not len(concepts) == len(concepts_int):
                raise ValueError('Approx stability returns less con')
            nsSgnt = sspec(PvSpec, alpha, corr=corr, report='e')
            if filter_concepts is True:
                concepts_stab = filter(conceptFilter, concepts)
                #               Extract significant CFISs with pattern spectrum filtering
                concepts_psf = [
                    c for c in concepts if (len(c[0]), len(c[1])) not in nsSgnt
                ]
                # Return concepts, p-val spectrum and non-significant signature
                if verbose is True:
                    print 'psf(): done'
                t4 = time.time()
                print 'time filtering', t4 - t3
                return concepts_stab, concepts_psf, PvSpec, nsSgnt
            else:
                return concepts, PvSpec, nsSgnt
        else:
            pass
    else:
        if rank == 0:
            nsSgnt = sspec(PvSpec, alpha, corr=corr, report='e')
            concepts = []
            for intent in concepts_int:
                concepts.append(
                    (set(intent),
                     set(
                         np.where(
                             np.prod(rel_matrix[:, intent], axis=1) == 1)[0])))

            if filter_concepts is True:
                # Extract significant CFISs with pattern spectrum filtering
                concepts = [
                    c for c in concepts if (len(c[0]), len(c[1])) not in nsSgnt
                ]
            # Return concepts, p-val spectrum and non-significant signature
            if verbose is True:
                print 'psf(): done'
            return concepts, PvSpec, nsSgnt
        else:
            pass
Пример #29
0
 def test_binned_sparsity(self):
     train = neo.SpikeTrain(np.arange(10), t_stop=10 * pq.s, units=pq.s)
     bst = cv.BinnedSpikeTrain(train, num_bins=100)
     self.assertAlmostEqual(bst.sparsity, 0.1)
Пример #30
0
def jointJ_window_analysis(data,
                           binsize,
                           winsize,
                           winstep,
                           pattern_hash,
                           method='analytic_TrialByTrial',
                           t_start=None,
                           t_stop=None,
                           binary=True,
                           n_surr=100):
    """
    Calculates the joint surprise in a sliding window fashion.

    Implementation is based on :cite:`unitary_event_analysis-Gruen99_67`.

    Parameters
    ----------
    data : list
        A list of spike trains (`neo.SpikeTrain` objects) in different trials:
            0-axis --> Trials

            1-axis --> Neurons

            2-axis --> Spike times
    binsize : pq.Quantity
        The size of bins for discretizing spike trains.
    winsize : pq.Quantity
        The size of the window of analysis.
    winstep : pq.Quantity
        The size of the window step.
    pattern_hash : list of int
        list of interested patterns in hash values
        (see `hash_from_pattern` and `inverse_hash_from_pattern` functions)
    method : str
        The method with which the unitary events whould be computed
          'analytic_TrialByTrial' -- > calculate the expectency
          (analytically) on each trial, then sum over all trials.

          'analytic_TrialAverage' -- > calculate the expectency
          by averaging over trials (cf. Gruen et al. 2003).

          'surrogate_TrialByTrial' -- > calculate the distribution
          of expected coincidences by spike time randomzation in
          each trial and sum over trials.
        Default is 'analytic_trialByTrial'
    t_start : float or pq.Quantity, optional
        The start time to use for the time points.
        If not specified, retrieved from the `t_start` attribute of
        spiketrains.
    t_stop : float or pq.Quantity, optional
        The start time to use for the time points.
        If not specified, retrieved from the `t_stop` attribute of
        spiketrains.
    n_surr : int, optional
        The number of surrogates to be used.
        Default is 100.

    Returns
    -------
    dict
        The values of each key has the shape of
          different pattern hash --> 0-axis

          different window --> 1-axis
        Js: list of float
          JointSurprise of different given patterns within each window.
        indices: list of list of int
          A list of indices of pattern within each window.
        n_emp: list of int
          The empirical number of each observed pattern.
        n_exp: list of float
          The expected number of each pattern.
        rate_avg: list of float
          The average firing rate of each neuron.


    Raises
    ------
    ValueError
        If `data` is not in the format, specified above.
    NotImplementedError
        If `binary` is not True. The method works only with binary matrices at
        the moment.

    Warns
    -----
    UserWarning
        The ratio between `winsize` or `winstep` and `binsize` is not an
        integer.

    """
    if not isinstance(data[0][0], neo.SpikeTrain):
        raise ValueError(
            "structure of the data is not correct: 0-axis should be trials, "
            "1-axis units and 2-axis neo spike trains")

    if t_start is None:
        t_start = data[0][0].t_start.rescale('ms')
    if t_stop is None:
        t_stop = data[0][0].t_stop.rescale('ms')

    # position of all windows (left edges)
    t_winpos = _winpos(t_start, t_stop, winsize, winstep, position='left-edge')
    t_winpos_bintime = _bintime(t_winpos, binsize)

    winsize_bintime = _bintime(winsize, binsize)
    winstep_bintime = _bintime(winstep, binsize)

    if winsize_bintime * binsize != winsize:
        warnings.warn(
            "The ratio between winsize ({winsize}) and binsize ({binsize}) is "
            "not an integer".format(winsize=winsize, binsize=binsize))

    if winstep_bintime * binsize != winstep:
        warnings.warn(
            "The ratio between winstep ({winstep}) and binsize ({binsize}) is "
            "not an integer".format(winstep=winstep, binsize=binsize))

    num_tr, N = np.shape(data)[:2]

    n_bins = int((t_stop - t_start) / binsize)

    mat_tr_unit_spt = np.zeros((len(data), N, n_bins))
    for tr, sts in enumerate(data):
        sts = list(sts)
        bs = conv.BinnedSpikeTrain(sts,
                                   t_start=t_start,
                                   t_stop=t_stop,
                                   binsize=binsize)
        if binary is True:
            mat = bs.to_bool_array()
        else:
            raise NotImplementedError(
                "The method works only with binary matrices at the moment")
        mat_tr_unit_spt[tr] = mat

    num_win = len(t_winpos)
    Js_win, n_exp_win, n_emp_win = (np.zeros(num_win) for _ in range(3))
    rate_avg = np.zeros((num_win, N))
    indices_win = {}
    for i in range(num_tr):
        indices_win['trial' + str(i)] = []

    for i, win_pos in enumerate(t_winpos_bintime):
        mat_win = mat_tr_unit_spt[:, :, win_pos:win_pos + winsize_bintime]
        if method == 'surrogate_TrialByTrial':
            Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[
                i], indices_lst = _UE(mat_win,
                                      pattern_hash,
                                      method,
                                      n_surr=n_surr)
        else:
            Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[
                i], indices_lst = _UE(mat_win, pattern_hash, method)
        for j in range(num_tr):
            if len(indices_lst[j][0]) > 0:
                indices_win['trial' + str(j)] = np.append(
                    indices_win['trial' + str(j)], indices_lst[j][0] + win_pos)
    return {
        'Js': Js_win,
        'indices': indices_win,
        'n_emp': n_emp_win,
        'n_exp': n_exp_win,
        'rate_avg': rate_avg / binsize
    }