コード例 #1
0
    def test_multitaper_psd_against_nitime(self):
        """
        This test assesses the match between this implementation of
        multitaper against nitime (0.8) using a predefined time series
        generated by an autoregressive model.

        Please follow the link below for more details:
        https://gin.g-node.org/INM-6/elephant-data/src/master/unittest/spectral/multitaper_psd
        """
        data_url = r"https://web.gin.g-node.org/INM-6/elephant-data/raw/master/unittest/spectral/multitaper_psd/data"  # noqa

        files_to_download = [
            ("time_series.npy", "ff43797e2ac94613f510b20a31e2e80e"),
            ("psd_nitime.npy", "89d1f53957e66c786049ea425b53c0e8")
        ]

        for filename, checksum in files_to_download:
            download(url=f"{data_url}/{filename}", checksum=checksum)

        time_series = np.load(ELEPHANT_TMP_DIR / 'time_series.npy')
        psd_nitime = np.load(ELEPHANT_TMP_DIR / 'psd_nitime.npy')

        freqs, psd_multitaper = elephant.spectral.multitaper_psd(
            signal=time_series, fs=0.1, nw=4, num_tapers=8)

        np.testing.assert_allclose(psd_multitaper,
                                   psd_nitime,
                                   rtol=0.3,
                                   atol=0.1)
コード例 #2
0
    def test_Riehle_et_al_97_UE(self):
        url = "http://raw.githubusercontent.com/ReScience-Archives/Rostami-" \
              "Ito-Denker-Gruen-2017/master/data"
        files_to_download = (("extracted_data.npy",
                              "c4903666ce8a8a31274d6b11238a5ac3"),
                             ("winny131_23.gdf",
                              "cc2958f7b4fb14dbab71e17bba49bd10"))
        for filename, checksum in files_to_download:
            # The files will be downloaded to ELEPHANT_TMP_DIR
            download(url=f"{url}/{filename}", checksum=checksum)

        # load spike data of figure 2 of Riehle et al 1997
        spiketrain = self.load_gdf2Neo(ELEPHANT_TMP_DIR / "winny131_23.gdf",
                                       trigger='RS_4',
                                       t_pre=1799 * pq.ms,
                                       t_post=300 * pq.ms)

        # calculating UE ...
        winsize = 100 * pq.ms
        bin_size = 5 * pq.ms
        winstep = 5 * pq.ms
        pattern_hash = [3]
        t_start = spiketrain[0][0].t_start
        t_stop = spiketrain[0][0].t_stop
        t_winpos = ue._winpos(t_start, t_stop, winsize, winstep)
        significance_level = 0.05

        UE = ue.jointJ_window_analysis(spiketrain,
                                       pattern_hash=pattern_hash,
                                       bin_size=bin_size,
                                       win_size=winsize,
                                       win_step=winstep,
                                       method='analytic_TrialAverage')
        # load extracted data from figure 2 of Riehle et al 1997
        extracted_data = np.load(ELEPHANT_TMP_DIR / 'extracted_data.npy',
                                 encoding='latin1',
                                 allow_pickle=True).item()
        Js_sig = ue.jointJ(significance_level)
        sig_idx_win = np.where(UE['Js'] >= Js_sig)[0]
        diff_UE_rep = []
        y_cnt = 0
        for trial_id in range(len(spiketrain)):
            trial_id_str = "trial{}".format(trial_id)
            indices_unique = np.unique(UE['indices'][trial_id_str])
            if len(indices_unique) > 0:
                # choose only the significant coincidences
                indices_unique_significant = []
                for j in sig_idx_win:
                    significant = indices_unique[np.where(
                        (indices_unique * bin_size >= t_winpos[j])
                        & (indices_unique * bin_size < t_winpos[j] + winsize))]
                    indices_unique_significant.extend(significant)
                x_tmp = np.unique(indices_unique_significant) * \
                    bin_size.magnitude
                if len(x_tmp) > 0:
                    ue_trial = np.sort(extracted_data['ue'][y_cnt])
                    diff_UE_rep = np.append(diff_UE_rep, x_tmp - ue_trial)
                    y_cnt += +1
        np.testing.assert_array_less(np.abs(diff_UE_rep), 0.3)
コード例 #3
0
    def test_spike_contrast_with_Izhikevich_network_auto(self):
        # This test reproduces the Test data 3 (Izhikevich network), fig. 3,
        # Manuel Ciba et. al, 2018.
        # The data is a dictionary of simulations of different networks.
        # Each simulation of a network is a dictionary with two keys:
        # 'spiketrains' and the ground truth 'synchrony'.
        # The default unit time is seconds. Each simulation lasted 2 seconds,
        # starting from 0.

        izhikevich_url = r"https://web.gin.g-node.org/INM-6/" \
                         r"elephant-data/raw/master/" \
                         r"dataset-3/Data_Izhikevich_network.zip"
        filepath_zip = download(url=izhikevich_url,
                                checksum="70e848500c1d9c6403b66de8c741d849")
        unzip(filepath_zip)
        filepath = filepath_zip.replace(".zip", ".json")
        with open(filepath) as read_file:
            data = json.load(read_file)

        # for the sake of compute time, take the first 5 networks
        networks_subset = tuple(data.values())[:5]

        for network_simulations in networks_subset:
            for simulation in network_simulations.values():
                synchrony_true = simulation['synchrony']
                spiketrains = [
                    neo.SpikeTrain(st,
                                   t_start=0 * second,
                                   t_stop=2 * second,
                                   units=second)
                    for st in simulation['spiketrains']
                ]
                synchrony = spike_contrast(spiketrains)
                self.assertAlmostEqual(synchrony, synchrony_true, places=2)