예제 #1
0
def test_notch_filters(method, filter_length, line_freq, tol):
    """Test notch filters."""
    # let's use an ugly, prime sfreq for fun
    rng = np.random.RandomState(0)
    sfreq = 487
    sig_len_secs = 21
    t = np.arange(0, int(round(sig_len_secs * sfreq))) / sfreq

    # make a "signal"
    a = rng.randn(int(sig_len_secs * sfreq))
    orig_power = np.sqrt(np.mean(a**2))
    # make line noise
    a += np.sum([np.sin(2 * np.pi * f * t) for f in line_freqs], axis=0)

    # only allow None line_freqs with 'spectrum_fit' mode
    for kind in ('fir', 'iir'):
        with pytest.raises(ValueError, match='freqs=None can only be used wi'):
            notch_filter(a, sfreq, None, kind)
    with catch_logging() as log_file:
        b = notch_filter(a,
                         sfreq,
                         line_freq,
                         filter_length,
                         method=method,
                         verbose=True)
    if line_freq is None:
        out = [
            line.strip().split(':')[0]
            for line in log_file.getvalue().split('\n') if line.startswith(' ')
        ]
        assert len(out) == 4, 'Detected frequencies not logged properly'
        out = np.array(out, float)
        assert_array_almost_equal(out, line_freqs)
    new_power = np.sqrt(sum_squared(b) / b.size)
    assert_almost_equal(new_power, orig_power, tol)
예제 #2
0
def file_to_nparray(fname,
                    device,
                    sfreq=100.0,
                    verbose=False,
                    scaling_factor=None,
                    notch=None):
    """
	Create a mne raw instance from csv file.
	"""
    # get channel names
    # in MNE, this means you must config ith two arrays:
    # 1) an array of channel names as strings
    # 2) corresponding array of channel types. in our case, all channels are type 'eeg'
    ch_names = getChannelNames(device)
    ch_type = ['eeg'] * len(ch_names)

    # add one more channel called 'class_label' as type 'stim'
    # this type tells MNE to treat this channel of data as a class label
    ch_names.extend(['class_label'])
    ch_type.extend(['stim'])

    # Read EEG file
    data = pd.read_table(fname, header=None, names=ch_names)

    # sometimes, rarely, might need to scale the data because your device recorded with an
    # incorrect order of magnitude
    if scaling_factor is not None:
        data.loc[:, :] *= scaling_factor
        data.loc[:, 'class_label'] /= scaling_factor
        #print data

    raw_data = np.array(data[ch_names], dtype=np.float64).T

    if notch is not None:
        if notch == 60:
            print "Applying notch filter at 60Hz"
            # notch filter params
            notch_filter(raw_data,
                         Fs=sfreq,
                         freqs=[60],
                         filter_length=raw_data.shape[1] - 1,
                         phase='zero',
                         fir_window='hamming')  # 60Hzin us, 50Hz in Europe

    filtered_data = raw_data.astype(int)

    # create and populate MNE info structure
    info = create_info(ch_names, sfreq=sfreq, ch_types=ch_type)
    info['filename'] = fname

    # create raw object
    return [filtered_data, info]
예제 #3
0
def fig_gamma_to_hfo(po, pos, sig1, sig2, sig3):
    global freq, time_spec, spec_mtrx
    ax1 = py.subplot(gs[pos[2]:pos[3], pos[0]:pos[1]])
    set_axis(ax1, -0.05, 1.05, letter= po)
    
    tps = [[2, 5], [2, 5],[0.4, 4]]
    sigs = [sig1,sig2,sig3]   
    i=0
    dur=10
    minute = int(60/dur)
    for sig,tp in zip(sigs,tps):
        sig = notch_filter(sig, Fs, np.arange(50, 450, 50))
        freq_g, time_spec, spec_mtrx1 = spectrogram(sig, Fs, nperseg=Fs*dur, noverlap=0)
        cp = np.max(spec_mtrx1[30*dur:65*dur], axis = 0)
        cp2 = np.max(spec_mtrx1[80*dur:180*dur], axis = 0)
        if i==0:
            ax1.plot([0,1], [cp[int(tp[0]*minute)], cp[tp[1]*minute]], '-o', color = 'navy', label='Gamma 30-65 Hz')
            ax1.plot([0,1], [cp2[int(tp[0]*minute)], cp2[tp[1]*minute]],'-o', color='indianred', label= 'KX HFO')
        else:
            ax1.plot([0,1], [cp[int(tp[0]*minute)], cp[tp[1]*minute]],'-o', color = 'navy')
            ax1.plot([0,1], [cp2[int(tp[0]*minute)], cp2[tp[1]*minute]],'-o', color='indianred')
        i+=1
    # ax1.plot([hfo_power[0], hfo_power[30], hfo_power[-1]])
    ax1.legend(loc='lower right', bbox_to_anchor=(1.4, 1), ncol=1, frameon = True, fontsize = 12)
    py.xticks([0,1], ['bef. KX', 'after KX'])
    py.xlim(-0.5, 1.5)
    py.yscale('log')
    py.ylabel('power $mV^2$')
    ax1.spines['right'].set_visible(False)
    ax1.spines['top'].set_visible(False)
예제 #4
0
def test_notch_filters():
    """Test notch filters."""
    # let's use an ugly, prime sfreq for fun
    sfreq = 487.0
    sig_len_secs = 20
    t = np.arange(0, int(sig_len_secs * sfreq)) / sfreq
    freqs = np.arange(60, 241, 60)

    # make a "signal"
    a = rng.randn(int(sig_len_secs * sfreq))
    orig_power = np.sqrt(np.mean(a ** 2))
    # make line noise
    a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0)

    # only allow None line_freqs with 'spectrum_fit' mode
    assert_raises(ValueError, notch_filter, a, sfreq, None, 'fft')
    assert_raises(ValueError, notch_filter, a, sfreq, None, 'iir')
    methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir']
    filter_lengths = ['auto', 'auto', 'auto', 8192, 'auto']
    line_freqs = [None, freqs, freqs, freqs, freqs]
    tols = [2, 1, 1, 1]
    for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols):
        with catch_logging() as log_file:
            with warnings.catch_warnings(record=True):
                b = notch_filter(a, sfreq, lf, fl, method=meth, verbose=True)
        if lf is None:
            out = log_file.getvalue().split('\n')[:-1]
            if len(out) != 2 and len(out) != 3:  # force_serial: len(out) == 3
                raise ValueError('Detected frequencies not logged properly')
            out = np.fromstring(out[-1], sep=', ')
            assert_array_almost_equal(out, freqs)
        new_power = np.sqrt(sum_squared(b) / b.size)
        assert_almost_equal(new_power, orig_power, tol)
예제 #5
0
def test_notch_filters():
    """Test notch filters
    """
    # let's use an ugly, prime sfreq for fun
    sfreq = 487.0
    sig_len_secs = 20
    t = np.arange(0, int(sig_len_secs * sfreq)) / sfreq
    freqs = np.arange(60, 241, 60)

    # make a "signal"
    rng = np.random.RandomState(0)
    a = rng.randn(int(sig_len_secs * sfreq))
    orig_power = np.sqrt(np.mean(a ** 2))
    # make line noise
    a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0)

    # only allow None line_freqs with 'spectrum_fit' mode
    assert_raises(ValueError, notch_filter, a, sfreq, None, "fft")
    assert_raises(ValueError, notch_filter, a, sfreq, None, "iir")
    methods = ["spectrum_fit", "spectrum_fit", "fft", "fft", "iir"]
    filter_lengths = [None, None, None, 8192, None]
    line_freqs = [None, freqs, freqs, freqs, freqs]
    tols = [2, 1, 1, 1]
    for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols):
        with catch_logging() as log_file:
            b = notch_filter(a, sfreq, lf, filter_length=fl, method=meth, verbose="INFO")

        if lf is None:
            out = log_file.getvalue().split("\n")[:-1]
            if len(out) != 2:
                raise ValueError("Detected frequencies not logged properly")
            out = np.fromstring(out[1], sep=", ")
            assert_array_almost_equal(out, freqs)
        new_power = np.sqrt(sum_squared(b) / b.size)
        assert_almost_equal(new_power, orig_power, tol)
예제 #6
0
def test_notch_filters():
    """Test notch filters."""
    # let's use an ugly, prime sfreq for fun
    sfreq = 487.0
    sig_len_secs = 20
    t = np.arange(0, int(sig_len_secs * sfreq)) / sfreq
    freqs = np.arange(60, 241, 60)

    # make a "signal"
    a = rng.randn(int(sig_len_secs * sfreq))
    orig_power = np.sqrt(np.mean(a ** 2))
    # make line noise
    a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0)

    # only allow None line_freqs with 'spectrum_fit' mode
    assert_raises(ValueError, notch_filter, a, sfreq, None, 'fft')
    assert_raises(ValueError, notch_filter, a, sfreq, None, 'iir')
    methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir']
    filter_lengths = ['auto', 'auto', 'auto', 8192, 'auto']
    line_freqs = [None, freqs, freqs, freqs, freqs]
    tols = [2, 1, 1, 1]
    for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols):
        with catch_logging() as log_file:
            with warnings.catch_warnings(record=True):
                b = notch_filter(a, sfreq, lf, fl, method=meth, verbose=True)
        if lf is None:
            out = log_file.getvalue().split('\n')[:-1]
            if len(out) != 2 and len(out) != 3:  # force_serial: len(out) == 3
                raise ValueError('Detected frequencies not logged properly')
            out = np.fromstring(out[-1], sep=', ')
            assert_array_almost_equal(out, freqs)
        new_power = np.sqrt(sum_squared(b) / b.size)
        assert_almost_equal(new_power, orig_power, tol)
예제 #7
0
def notch_filtfilt(data, fs, fs_cut, fs_band, verbose=False):
    # Fp1 = fs_cut - fs_band / 2 and Fs2 = fs_cut + fs_band / 2
    return notch_filter(x=data,
                        Fs=fs,
                        freqs=fs_cut,
                        trans_bandwidth=fs_band,
                        verbose=verbose)
예제 #8
0
def test_filter_auto():
    """Test filter auto parameters."""
    # test that our overlap-add filtering doesn't introduce strange
    # artifacts (from mne_analyze mailing list 2015/06/25)
    N = 300
    sfreq = 100.
    lp = 10.
    sine_freq = 1.
    x = np.ones(N)
    t = np.arange(N) / sfreq
    x += np.sin(2 * np.pi * sine_freq * t)
    x_orig = x.copy()
    for pad in ('reflect_limited', 'reflect', 'edge'):
        for fir_design in ('firwin2', 'firwin'):
            kwargs = dict(fir_design=fir_design, pad=pad)
            x = x_orig.copy()
            x_filt = filter_data(x, sfreq, None, lp, **kwargs)
            assert_array_equal(x, x_orig)
            n_edge = 10
            assert_allclose(x[n_edge:-n_edge],
                            x_filt[n_edge:-n_edge],
                            atol=1e-2)
            assert_array_equal(x_filt,
                               filter_data(x, sfreq, None, lp, None, **kwargs))
            assert_array_equal(x, x_orig)
            assert_array_equal(x_filt, filter_data(x, sfreq, None, lp,
                                                   **kwargs))
            assert_array_equal(x, x_orig)
            assert_array_equal(
                x_filt, filter_data(x, sfreq, None, lp, copy=False, **kwargs))
            assert_array_equal(x, x_filt)

    # degenerate conditions
    pytest.raises(ValueError, filter_data, x, -sfreq, 1, 10)
    pytest.raises(ValueError, filter_data, x, sfreq, 1, sfreq * 0.75)
    with pytest.raises(ValueError, match='Data to be filtered must be real'):
        filter_data(x.astype(np.float32), sfreq, None, 10)
    with pytest.raises(ValueError, match='Data to be filtered must be real'):
        filter_data([1j], 1000., None, 40.)
    with pytest.raises(TypeError, match='instance of ndarray'):
        filter_data('foo', 1000., None, 40.)
    # gh-10258
    raw = RawArray([[0.]], create_info(1, 1000., 'eeg'))
    with pytest.raises(TypeError, match=r'.*copy\(\)\.filter\(\.\.\.\)` in.*'):
        filter_data(raw, 1000., None, 40.)
    with pytest.raises(TypeError, match=r'.*copy\(\)\.notch_filter\(\.\.\..*'):
        notch_filter(raw, 1000., [60.])
예제 #9
0
def fig_power(po,
              pos,
              sig_loc,
              start=400,
              stop=405,
              Title='',
              asteriks=[90, 200]):
    global freq
    from mpl_toolkits.axes_grid.inset_locator import inset_axes
    ax1 = py.subplot(gs[pos[2]:pos[3], pos[0]:pos[1]])
    set_axis(ax1, -0.1, 1.05, letter=po)
    labels = ['Olf. bulb', 'Thalamus', 'Visual ctrx']

    for i in range(2, -1, -1):
        sig_loc[i] = notch_filter(sig_loc[i], Fs, np.arange(50, 450, 50))
        freq, sp = welch(sig_loc[i], Fs, nperseg=1 * Fs)
        ax1.plot(freq, sp, lw=2, label=labels[i])
    py.legend(loc=2, fontsize=15)
    ax1.text(asteriks[0], asteriks[1], '*', fontsize=20)
    ax1.text(160, 440, '?', fontsize=20)
    py.xlim(0, 260)
    py.ylim(0, 500)
    py.ylabel('power ($mv^2$)')
    ax1.spines['right'].set_visible(False)
    ax1.spines['top'].set_visible(False)
    py.xlabel('Frequency (Hz)')
    if po != 'B3':
        inset_axes = inset_axes(ax1, width="23%", height=1.0, loc=1)
        for n in range(3, sig_loc.shape[0]):
            sig_loc[n] = notch_filter(sig_loc[n], Fs, np.arange(50, 450, 50))
            freq, sp = welch(sig_loc[n], Fs, nperseg=10 * Fs)
            py.plot(freq, sp, lw=0.7, color='green')
        py.ylim(0, 220)
        py.xlim(0, 260)
        py.xticks(fontsize=11)
        py.yticks(fontsize=11)
        # py.ylim(.1, 10e4)
        # py.yscale('log')
        # py.text(200,100, 'Olf. bulb propofol')
        py.xlabel('Frequency (Hz)')
    if po == 'B1':
        ax1.legend(loc='lower right',
                   bbox_to_anchor=(1.2, 1.1),
                   ncol=3,
                   frameon=True,
                   fontsize=15)
예제 #10
0
def pilot_plot():
    py.figure()
    import seaborn as sns
    clrs = sns.color_palette('husl', n_colors=50)
    for i in range(0,32,1):
        cat3_order[i] = notch_filter(cat3_order[i], Fs, np.arange(50, 350, 50))
        freq, sp = welch(cat3_order[i], Fs, nperseg=10*Fs)
        py.plot(freq[50:3000], sp[50:3000], label = str(i), color = clrs[i])
    py.legend()
예제 #11
0
 def notch_filter(dat, Fs=250, freqs=60):
     # apply notch filter only on 60 Hz, default setting of OpenBCI
     # sampling rate is 250 Hz
     # `dat` is an array
     # https://www.martinos.org/mne/stable/generated/mne.filter.notch_filter.html
     # `notch_filter` can be a class method in data
     # https://www.martinos.org/mne/stable/generated/mne.io.RawArray.html?highlight=notch_filter#mne.io.RawArray.notch_filter
     # e.g.: dat.notch_filter(60)
     return mf.notch_filter(dat, Fs, freqs)
def run(x, fs, n_jobs):
    plt.figure()
    plt.plot(x.T)
    plt.figure()
    plt.psd(x, Fs=fs)
    x2 = notch_filter(x, fs, np.arange(60, 241, 60), notch_widths=5, filter_length='auto', copy=True, n_jobs=n_jobs)
    plt.figure()
    plt.plot(x2.T)
    plt.figure()
    plt.psd(x2, Fs=fs)
    plt.show()
예제 #13
0
def bandstop_filter(X, y, sfreq, bandwidth, freqs_to_notch):
    """Apply a band-stop filter with desired bandwidth at the desired frequency
    position.

    Suggested e.g. in [1]_ and [2]_

    Parameters
    ----------
    X : torch.Tensor
        EEG input example or batch.
    y : torch.Tensor
        EEG labels for the example or batch.
    sfreq : float
        Sampling frequency of the signals to be filtered.
    bandwidth : float
        Bandwidth of the filter, i.e. distance between the low and high cut
        frequencies.
    freqs_to_notch : array-like | None
        Array of floats of size ``(batch_size,)`` containing the center of the
        frequency band to filter out for each sample in the batch. Frequencies
        should be greater than ``bandwidth/2 + transition`` and lower than
        ``sfreq/2 - bandwidth/2 - transition`` (where ``transition = 1 Hz``).

    Returns
    -------
    torch.Tensor
        Transformed inputs.
    torch.Tensor
        Transformed labels.

    References
    ----------
    .. [1] Cheng, J. Y., Goh, H., Dogrusoz, K., Tuzel, O., & Azemi, E. (2020).
       Subject-aware contrastive learning for biosignals. arXiv preprint
       arXiv:2007.04871.
    .. [2] Mohsenvand, M. N., Izadi, M. R., & Maes, P. (2020). Contrastive
       Representation Learning for Electroencephalogram Classification. In
       Machine Learning for Health (pp. 238-253). PMLR.
    """
    if bandwidth == 0:
        return X, y
    transformed_X = X.clone()
    for c, (sample, notched_freq) in enumerate(
            zip(transformed_X, freqs_to_notch)):
        sample = sample.cpu().numpy().astype(np.float64)
        transformed_X[c] = torch.as_tensor(notch_filter(
            sample,
            Fs=sfreq,
            freqs=notched_freq,
            method='fir',
            notch_widths=bandwidth,
            verbose=False
        ))
    return transformed_X, y
예제 #14
0
def fig_power(po, pos, sig_loc, start=400, stop = 405, Title = '', asteriks=[0,0]):
    global freq
    from mpl_toolkits.axes_grid.inset_locator import inset_axes
    ax1 = py.subplot(gs[pos[2]:pos[3], pos[0]:pos[1]])
    py.title(Title, fontsize=15)
    set_axis(ax1, -0.1, 1.05, letter= po)
    labels= ['Olf. bulb', 'Thalamus', 'Visual ctrx']
    for i in range(2,-1,-1):
        sig_loc[i] = notch_filter(sig_loc[i], Fs, np.arange(50, 450, 50))
        freq, sp = welch(sig_loc[i], Fs, nperseg = 1*Fs)
        ax1.plot(freq, sp, lw=4, label=labels[i])
        # ax1.text(asteriks[0], asteriks[1] , '*', fontsize=20)
        py.arrow(asteriks[0], asteriks[1], 0, -12, length_includes_head=True, clip_on = False,
                 head_width=2, head_length=4)
    # py.ylim(.1, 10e4)
    py.ylim(0,220)
    py.xlim(0, 155)
    py.ylabel('power $mV^2$')
    ax1.spines['right'].set_visible(False)
    ax1.spines['top'].set_visible(False)
    py.xlabel('Frequency (Hz)')
    if po!='B1': 
        inset_axes = inset_axes(ax1, width="23%", height=1.0, loc=1)
        for n in range(3,sig_loc.shape[0]):
            sig_loc[n] = notch_filter(sig_loc[n], Fs, np.arange(50, 450, 50))
            freq, sp = welch(sig_loc[n], Fs, nperseg = 1*Fs)
            py.plot(freq, sp, lw=2, color='green')
        py.ylim(0,220)
        py.xlim(0,155)
        py.xticks(fontsize=11)
        py.yticks(fontsize=11)
    # py.ylim(.1, 10e4)
    # py.yscale('log')
    # py.text(200,100, 'Olf. bulb propofol')
        py.xlabel('Frequency (Hz)')
    if po=='B1': 
        ket = mpatches.Patch(color='green', label='Olf. bulb')
        kx = mpatches.Patch(color='orange', label='Thalamus LGN')
        gam = mpatches.Patch(color='blue', label='Visual cortex')
        ax1.legend(loc='lower right', bbox_to_anchor=(1.2, .7), handles=[ket,kx,gam],
                   ncol=1, frameon = True, fontsize = 15)
예제 #15
0
 def process(self, data):
     if self.type == 'low-pass':
         return low_pass_filter(data, **self.params)
     elif self.type == 'high-pass':
         return high_pass_filter(data, **self.params)
     elif self.type == 'band-pass':
         return band_pass_filter(data, **self.params)
     elif self.type == 'band-stop':
         return band_stop_filter(data, **self.params)
     elif self.type == 'notch':
         return notch_filter(data, **self.params)
     else:
         raise ValueError('Unsupported filter type: {}'.format(self.type))
def test_notch_filters():
    """Test notch filters
    """
    tempdir = _TempDir()
    log_file = op.join(tempdir, 'temp_log.txt')
    # let's use an ugly, prime sfreq for fun
    sfreq = 487.0
    sig_len_secs = 20
    t = np.arange(0, int(sig_len_secs * sfreq)) / sfreq
    freqs = np.arange(60, 241, 60)

    # make a "signal"
    rng = np.random.RandomState(0)
    a = rng.randn(int(sig_len_secs * sfreq))
    orig_power = np.sqrt(np.mean(a**2))
    # make line noise
    a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0)

    # only allow None line_freqs with 'spectrum_fit' mode
    assert_raises(ValueError, notch_filter, a, sfreq, None, 'fft')
    assert_raises(ValueError, notch_filter, a, sfreq, None, 'iir')
    methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir']
    filter_lengths = [None, None, None, 8192, None]
    line_freqs = [None, freqs, freqs, freqs, freqs]
    tols = [2, 1, 1, 1]
    for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols):
        if lf is None:
            set_log_file(log_file, overwrite=True)

        b = notch_filter(a,
                         sfreq,
                         lf,
                         filter_length=fl,
                         method=meth,
                         verbose='INFO')

        if lf is None:
            set_log_file()
            with open(log_file) as fid:
                out = fid.readlines()
            if len(out) != 2:
                raise ValueError('Detected frequencies not logged properly')
            out = np.fromstring(out[1], sep=', ')
            assert_array_almost_equal(out, freqs)
        new_power = np.sqrt(sum_squared(b) / b.size)
        assert_almost_equal(new_power, orig_power, tol)
예제 #17
0
    def process(self, data):
        # fix for new MNE requirements
        import numpy as np
        data = np.asarray(data, dtype=np.float64)

        if self.type == 'low-pass':
            return low_pass_filter(data, **self.params)
        elif self.type == 'high-pass':
            return high_pass_filter(data, **self.params)
        elif self.type == 'band-pass':
            return band_pass_filter(data, **self.params)
        elif self.type == 'band-stop':
            return band_stop_filter(data, **self.params)
        elif self.type == 'notch':
            return notch_filter(data, **self.params)
        else:
            raise ValueError('Unsupported filter type: {}'.format(self.type))
예제 #18
0
    def process(self, data):
        # fix for new MNE requirements
        import numpy as np
        data = np.asarray(data, dtype=np.float64)

        if self.type == 'low-pass':
            return low_pass_filter(data, **self.params)
        elif self.type == 'high-pass':
            return high_pass_filter(data, **self.params)
        elif self.type == 'band-pass':
            return band_pass_filter(data, **self.params)
        elif self.type == 'band-stop':
            return band_stop_filter(data, **self.params)
        elif self.type == 'notch':
            return notch_filter(data, **self.params)
        else:
            raise ValueError('Unsupported filter type: {}'.format(self.type))
예제 #19
0
def test_notch_filters():
    """Test notch filters
    """
    tempdir = _TempDir()
    log_file = op.join(tempdir, 'temp_log.txt')
    # let's use an ugly, prime sfreq for fun
    sfreq = 487.0
    sig_len_secs = 20
    t = np.arange(0, int(sig_len_secs * sfreq)) / sfreq
    freqs = np.arange(60, 241, 60)

    # make a "signal"
    rng = np.random.RandomState(0)
    a = rng.randn(int(sig_len_secs * sfreq))
    orig_power = np.sqrt(np.mean(a ** 2))
    # make line noise
    a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0)

    # only allow None line_freqs with 'spectrum_fit' mode
    assert_raises(ValueError, notch_filter, a, sfreq, None, 'fft')
    assert_raises(ValueError, notch_filter, a, sfreq, None, 'iir')
    methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir']
    filter_lengths = [None, None, None, 8192, None]
    line_freqs = [None, freqs, freqs, freqs, freqs]
    tols = [2, 1, 1, 1]
    for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols):
        if lf is None:
            set_log_file(log_file, overwrite=True)

        b = notch_filter(a, sfreq, lf, filter_length=fl, method=meth,
                         verbose='INFO')

        if lf is None:
            set_log_file()
            with open(log_file) as fid:
                out = fid.readlines()
            if len(out) != 2:
                raise ValueError('Detected frequencies not logged properly')
            out = np.fromstring(out[1], sep=', ')
            assert_array_almost_equal(out, freqs)
        new_power = np.sqrt(sum_squared(b) / b.size)
        assert_almost_equal(new_power, orig_power, tol)
예제 #20
0
def test_notch_filters():
    """Test notch filters
    """
    # let's use an ugly, prime Fs for fun
    Fs = 487.0
    sig_len_secs = 20
    t = np.arange(0, sig_len_secs * Fs) / Fs
    freqs = np.arange(60, 241, 60)

    # make a "signal"
    rng = np.random.RandomState(0)
    a = rng.randn(sig_len_secs * Fs)
    orig_power = np.sqrt(np.mean(a ** 2))
    # make line noise
    a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0)

    # only allow None line_freqs with 'spectrum_fit' mode
    assert_raises(ValueError, notch_filter, a, Fs, None, 'fft')
    assert_raises(ValueError, notch_filter, a, Fs, None, 'iir')
    methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir']
    filter_lengths = [None, None, None, 8192, None]
    line_freqs = [None, freqs, freqs, freqs, freqs]
    tols = [2, 1, 1, 1]
    for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols):
        if lf is None:
            set_log_file(log_file)

        b = notch_filter(a, Fs, lf, filter_length=fl, method=meth,
                         verbose='INFO')

        if lf is None:
            set_log_file()
            out = open(log_file).readlines()
            if len(out) != 2:
                raise ValueError('Detected frequencies not logged properly')
            out = np.fromstring(out[1], sep=', ')
            assert_array_almost_equal(out, freqs)
        new_power = np.sqrt(np.mean(b ** 2))
        assert_almost_equal(new_power, orig_power, tol)
예제 #21
0
def segment_EEG(EEG,
                epoch_time,
                Fs,
                newFs,
                NW,
                amplitude_thres=500,
                notch_freq=None,
                bandpass_freq=None,
                start_end_remove_window_num=0,
                to_remove_mean=False,
                n_jobs=1,
                subject_file_name=''):  #
    """Segment EEG signals.
    """
    std_thres1 = 0.02
    std_thres2 = 0.1
    flat_seconds = 2

    if to_remove_mean:
        EEG = EEG - np.mean(EEG, axis=1, keepdims=True)
    epoch_size = int(round(epoch_time * Fs))
    flat_length = int(round(flat_seconds * Fs))

    ## filtering

    EEG = notch_filter(EEG, Fs, notch_freq, n_jobs=-1,
                       verbose='error')  # (#window, #ch, epoch_size+2padding)
    EEG = filter_data(detrend(EEG, axis=1),
                      Fs,
                      bandpass_freq[0],
                      bandpass_freq[1],
                      n_jobs=-1,
                      verbose='error')

    ## segment

    start_ids = np.arange(0, EEG.shape[1] - epoch_size + 1, epoch_size)
    if start_end_remove_window_num > 0:
        start_ids = start_ids[
            start_end_remove_window_num:-start_end_remove_window_num]
    seg_masks = [seg_mask_explanation[0]] * len(start_ids)
    EEG_segs = EEG[:,
                   list(map(lambda x: np.arange(x, x + epoch_size), start_ids)
                        )].transpose(1, 0,
                                     2)  # (#window, #ch, epoch_size+2padding)

    ## resampling

    mne_epochs = mne.EpochsArray(detrend(EEG_segs, axis=2),
                                 mne.create_info(ch_names=list(
                                     map(str, range(EEG_segs.shape[1]))),
                                                 sfreq=Fs,
                                                 ch_types='eeg'),
                                 verbose=False)
    if newFs != Fs:
        Fs = newFs
        mne_epochs.resample(Fs, n_jobs=n_jobs)
        EEG_segs = mne_epochs.get_data()
        epoch_size = int(round(epoch_time * Fs))
        flat_length = int(round(flat_seconds * Fs))

    ## calculate spectrogram

    BW = NW * 2. / epoch_time
    specs, freq = mne.time_frequency.psd_multitaper(mne_epochs,
                                                    fmin=bandpass_freq[0],
                                                    fmax=bandpass_freq[1],
                                                    adaptive=False,
                                                    low_bias=False,
                                                    n_jobs=n_jobs,
                                                    verbose='ERROR',
                                                    bandwidth=BW,
                                                    normalization='full')

    ## mark artifacts

    # nan in signal
    nan2d = np.any(np.isnan(EEG_segs), axis=2)
    nan1d = np.where(np.any(nan2d, axis=1))[0]
    for i in nan1d:
        seg_masks[i] = seg_mask_explanation[4]

    # flat signal
    short_segs = EEG_segs.reshape(EEG_segs.shape[0], EEG_segs.shape[1],
                                  EEG_segs.shape[2] // flat_length,
                                  flat_length)
    flat2d = np.any(detrend(short_segs, axis=3).std(axis=3) <= std_thres1,
                    axis=2)
    flat2d = np.logical_or(flat2d, np.std(EEG_segs, axis=2) <= std_thres2)
    flat1d = np.where(np.any(flat2d, axis=1))[0]
    for i in flat1d:
        seg_masks[i] = seg_mask_explanation[6]

    # big amplitude
    amplitude_large2d = np.any(np.abs(EEG_segs) > amplitude_thres, axis=2)
    amplitude_large1d = np.where(np.any(amplitude_large2d, axis=1))[0]
    for i in amplitude_large1d:
        seg_masks[i] = seg_mask_explanation[5]

    return EEG_segs, start_ids, seg_masks, specs, freq
# The averages for each conditions are computed.
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant

###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# original 60 Hz artifact and the harmonics. Normally this would be done to
# raw data (with :func:`mne.io.Raw.filter`), but to reduce memory consumption
# of this tutorial, we do it at evoked stage.
if use_precomputed:
    sfreq = evoked_std.info['sfreq']
    notches = [60, 120, 180]
    for evoked in (evoked_std, evoked_dev):
        evoked.data[:] = notch_filter(evoked.data, sfreq, notches)
        evoked.data[:] = filter_data(evoked_std.data, sfreq, None, 100)

###############################################################################
# Here we plot the ERF of standard and deviant conditions. In both conditions
# we can see the P50 and N100 responses. The mismatch negativity is visible
# only in the deviant condition around 100-200 ms. P200 is also visible around
# 170 ms in both conditions but much stronger in the standard condition. P300
# is visible in deviant condition only (decision making in preparation of the
# button press). You can view the topographies from a certain time span by
# painting an area with clicking and holding the left mouse button.
evoked_std.plot(window_title='Standard', gfp=True)
evoked_dev.plot(window_title='Deviant', gfp=True)

###############################################################################
# Show activations as topography figures.
예제 #23
0
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant

###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# original 60 Hz artifact and the harmonics. Normally this would be done to
# raw data (with :func:`mne.io.Raw.filter`), but to reduce memory consumption
# of this tutorial, we do it at evoked stage.
if use_precomputed:
    sfreq = evoked_std.info['sfreq']
    nchan = evoked_std.info['nchan']
    notches = [60, 120, 180]
    for ch_idx in range(nchan):
        evoked_std.data[ch_idx] = notch_filter(evoked_std.data[ch_idx], sfreq,
                                               notches, verbose='ERROR')
        evoked_dev.data[ch_idx] = notch_filter(evoked_dev.data[ch_idx], sfreq,
                                               notches, verbose='ERROR')
        evoked_std.data[ch_idx] = low_pass_filter(evoked_std.data[ch_idx],
                                                  sfreq, 100, verbose='ERROR')
        evoked_dev.data[ch_idx] = low_pass_filter(evoked_dev.data[ch_idx],
                                                  sfreq, 100, verbose='ERROR')

###############################################################################
# Here we plot the ERF of standard and deviant conditions. In both conditions
# we can see the P50 and N100 responses. The mismatch negativity is visible
# only in the deviant condition around 100-200 ms. P200 is also visible around
# 170 ms in both conditions but much stronger in the standard condition. P300
# is visible in deviant condition only (decision making in preparation of the
# button press). You can view the topographies from a certain time span by
# painting an area with clicking and holding the left mouse button.
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant

###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# original 60 Hz artifact and the harmonics. Normally this would be done to
# raw data (with :func:`mne.io.Raw.filter`), but to reduce memory consumption
# of this tutorial, we do it at evoked stage.
if use_precomputed:
    sfreq = evoked_std.info['sfreq']
    nchan = evoked_std.info['nchan']
    notches = [60, 120, 180]
    for ch_idx in range(nchan):
        evoked_std.data[ch_idx] = notch_filter(evoked_std.data[ch_idx], sfreq,
                                               notches, verbose='ERROR')
        evoked_dev.data[ch_idx] = notch_filter(evoked_dev.data[ch_idx], sfreq,
                                               notches, verbose='ERROR')
        evoked_std.data[ch_idx] = low_pass_filter(evoked_std.data[ch_idx],
                                                  sfreq, 100, verbose='ERROR')
        evoked_dev.data[ch_idx] = low_pass_filter(evoked_dev.data[ch_idx],
                                                  sfreq, 100, verbose='ERROR')

###############################################################################
# Here we plot the ERF of standard and deviant conditions. In both conditions
# we can see the P50 and N100 responses. The mismatch negativity is visible
# only in the deviant condition around 100-200 ms. P200 is also visible around
# 170 ms in both conditions but much stronger in the standard condition. P300
# is visible in deviant condition only (decision making in preparation of the
# button press). You can view the topographies from a certain time span by
# painting an area with clicking and holding the left mouse button.
# The averages for each conditions are computed.
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant

###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# original 60 Hz artifact and the harmonics. Normally this would be done to
# raw data (with :func:`mne.io.Raw.filter`), but to reduce memory consumption
# of this tutorial, we do it at evoked stage.
if use_precomputed:
    sfreq = evoked_std.info['sfreq']
    notches = [60, 120, 180]
    for evoked in (evoked_std, evoked_dev):
        evoked.data[:] = notch_filter(evoked.data, sfreq, notches)
        evoked.data[:] = filter_data(evoked_std.data, sfreq, None, 100)

###############################################################################
# Here we plot the ERF of standard and deviant conditions. In both conditions
# we can see the P50 and N100 responses. The mismatch negativity is visible
# only in the deviant condition around 100-200 ms. P200 is also visible around
# 170 ms in both conditions but much stronger in the standard condition. P300
# is visible in deviant condition only (decision making in preparation of the
# button press). You can view the topographies from a certain time span by
# painting an area with clicking and holding the left mouse button.
evoked_std.plot(window_title='Standard', gfp=True)
evoked_dev.plot(window_title='Deviant', gfp=True)

###############################################################################
# Show activations as topography figures.