示例#1
0
def cli(matfiles, savename, rec_type, infosrc):
    """
    Convert brainstorm epochs to mne.Epochs object
    """
    if infosrc:
        if rec_type is 'ds':
            from mne.io import read_raw_ctf as read_raw
        elif rec_type is 'fif':
            from mne.io import Raw as read_raw
        with nostdout():
            raw_with_info = read_raw(infosrc)

    isFirst = True
    for fname in matfiles:
        with nostdout():
            mat_epoch = sio.loadmat(fname)
            # click.echo(mat_epoch)
        if isFirst:
            data = mat_epoch['F']
            times = mat_epoch['Time']
            # print times[0,-1]
            isFirst = False
        else:
            data = np.dstack((data, mat_epoch['F']))
        # click.echo(data.shape)
    data = data.transpose((2,0,1))


    n_channels = data.shape[1]
    sfreq = times.shape[1] / (times[0,-1] + times[0,1])
    
    
    if infosrc:
        if rec_type is 'ds':
            from mne.io import read_raw_ctf as read_raw
        elif rec_type is 'fif':
            from mne.io import Raw as read_raw

        with nostdout():
            raw_with_info = read_raw(infosrc)
        good_info = raw_with_info.info
        # click.echo(len(good_info['ch_names']))

        ch_types = [channel_type(good_info, idx) for idx in range(n_channels)]

        # click.echo(len(ch_types))

        info = create_info(ch_names=good_info['ch_names'], sfreq=sfreq, ch_types=ch_types)
    else:
        ch_types='mag'
        info = create_info(n_channels, sfreq, ch_types)

    with nostdout():
        epochs = EpochsArray(data, info)
    epochs.save(savename)
示例#2
0
def test_picks_by_channels():
    """Test creating pick_lists."""
    rng = np.random.RandomState(909)

    test_data = rng.random_sample((4, 2000))
    ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
    ch_types = ['grad', 'mag', 'mag', 'eeg']
    sfreq = 250.0
    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
    _assert_channel_types(info)
    raw = RawArray(test_data, info)

    pick_list = _picks_by_type(raw.info)
    assert_equal(len(pick_list), 3)
    assert_equal(pick_list[0][0], 'mag')
    pick_list2 = _picks_by_type(raw.info, meg_combined=False)
    assert_equal(len(pick_list), len(pick_list2))
    assert_equal(pick_list2[0][0], 'mag')

    pick_list2 = _picks_by_type(raw.info, meg_combined=True)
    assert_equal(len(pick_list), len(pick_list2) + 1)
    assert_equal(pick_list2[0][0], 'meg')

    test_data = rng.random_sample((4, 2000))
    ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
    ch_types = ['mag', 'mag', 'mag', 'mag']
    sfreq = 250.0
    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
    raw = RawArray(test_data, info)
    # This acts as a set, not an order
    assert_array_equal(pick_channels(info['ch_names'], ['MEG 002', 'MEG 001']),
                       [0, 1])

    # Make sure checks for list input work.
    pytest.raises(ValueError, pick_channels, ch_names, 'MEG 001')
    pytest.raises(ValueError, pick_channels, ch_names, ['MEG 001'], 'hi')

    pick_list = _picks_by_type(raw.info)
    assert_equal(len(pick_list), 1)
    assert_equal(pick_list[0][0], 'mag')
    pick_list2 = _picks_by_type(raw.info, meg_combined=True)
    assert_equal(len(pick_list), len(pick_list2))
    assert_equal(pick_list2[0][0], 'mag')

    # pick_types type check
    pytest.raises(ValueError, raw.pick_types, eeg='string')

    # duplicate check
    names = ['MEG 002', 'MEG 002']
    assert len(pick_channels(raw.info['ch_names'], names)) == 1
    assert len(raw.copy().pick_channels(names)[0][0]) == 1
示例#3
0
def _generate_coherence_data():
    """Create an epochs object with coherence at 22Hz between channels 1 and 3.

    A base 10 Hz sine wave is generated for all channels, but with different
    phases, which means no actual coherence. A  22Hz sine wave is laid on top
    for channels 1 and 3, with the same phase, so there is coherence between
    these channels.
    """
    ch_names = ['CH1', 'CH2', 'CH3']
    sfreq = 50.
    info = mne.create_info(ch_names, sfreq, 'eeg')
    tstep = 1. / sfreq
    n_samples = int(10 * sfreq)  # 10 seconds of data
    times = np.arange(n_samples) * tstep
    events = np.array([[0, 1, 1]])  # one event

    # Phases for the signals
    phases = np.arange(info['nchan']) * 0.3 * np.pi

    # Generate 10 Hz sine waves with different phases
    signal = np.vstack([np.sin(times * 2 * np.pi * 10 + phase)
                        for phase in phases])

    data = np.zeros((1, info['nchan'], n_samples))
    data[0, :, :] = signal

    # Generate 22Hz sine wave at the first and last electrodes with the same
    # phase.
    signal = np.sin(times * 2 * np.pi * 22)
    data[0, [0, -1], :] += signal

    return mne.EpochsArray(data, info, events, baseline=(0, times[-1]))
def test_apply_function_verbose():
    """Test apply function verbosity
    """
    n_chan = 2
    n_times = 3
    ch_names = [str(ii) for ii in range(n_chan)]
    raw = RawArray(np.zeros((n_chan, n_times)),
                   create_info(ch_names, 1., 'mag'))
    # test return types in both code paths (parallel / 1 job)
    assert_raises(TypeError, raw.apply_function, bad_1,
                  None, None, 1)
    assert_raises(ValueError, raw.apply_function, bad_2,
                  None, None, 1)
    assert_raises(TypeError, raw.apply_function, bad_1,
                  None, None, 2)
    assert_raises(ValueError, raw.apply_function, bad_2,
                  None, None, 2)

    # check our arguments
    tempdir = _TempDir()
    test_name = op.join(tempdir, 'test.log')
    set_log_file(test_name)
    try:
        raw.apply_function(printer, None, None, 1, verbose=False)
        with open(test_name) as fid:
            assert_equal(len(fid.readlines()), 0)
        raw.apply_function(printer, None, None, 1, verbose=True)
        with open(test_name) as fid:
            assert_equal(len(fid.readlines()), n_chan)
    finally:
        set_log_file(None)
示例#5
0
def test_pick_seeg_ecog():
    """Test picking with sEEG and ECoG
    """
    names = 'A1 A2 Fz O OTp1 OTp2 E1 OTp3 E2 E3'.split()
    types = 'mag mag eeg eeg seeg seeg ecog seeg ecog ecog'.split()
    info = create_info(names, 1024., types)
    idx = channel_indices_by_type(info)
    assert_array_equal(idx['mag'], [0, 1])
    assert_array_equal(idx['eeg'], [2, 3])
    assert_array_equal(idx['seeg'], [4, 5, 7])
    assert_array_equal(idx['ecog'], [6, 8, 9])
    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 7])
    for i, t in enumerate(types):
        assert_equal(channel_type(info, i), types[i])
    raw = RawArray(np.zeros((len(names), 10)), info)
    events = np.array([[1, 0, 0], [2, 0, 0]])
    epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5, add_eeg_ref=False)
    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
    e_seeg = evoked.copy().pick_types(meg=False, seeg=True)
    for l, r in zip(e_seeg.ch_names, [names[4], names[5], names[7]]):
        assert_equal(l, r)
    # Deal with constant debacle
    raw = read_raw_fif(op.join(io_dir, 'tests', 'data',
                               'test_chpi_raw_sss.fif'), add_eeg_ref=False)
    assert_equal(len(pick_types(raw.info, meg=False, seeg=True, ecog=True)), 0)
def test_multitaper_psd():
    """ Test multi-taper PSD computation """

    import nitime as ni
    n_times = 1000
    n_channels = 5
    data = np.random.RandomState(0).randn(n_channels, n_times)
    sfreq = 500
    info = create_info(n_channels, sfreq, 'eeg')
    raw = RawArray(data, info)
    assert_raises(ValueError, psd_multitaper, raw, sfreq, normalization='foo')
    ni_5 = (LooseVersion(ni.__version__) >= LooseVersion('0.5'))
    norm = 'full' if ni_5 else 'length'

    for adaptive, n_jobs in zip((False, True, True), (1, 1, 2)):
        psd, freqs = psd_multitaper(raw, adaptive=adaptive,
                                    n_jobs=n_jobs,
                                    normalization=norm)
        freqs_ni, psd_ni, _ = ni.algorithms.spectral.multi_taper_psd(
            data, sfreq, adaptive=adaptive, jackknife=False)

        # for some reason nitime returns n_times + 1 frequency points
        # causing the value at 0 to be different
        assert_array_almost_equal(psd[:, 1:], psd_ni[:, 1:-1], decimal=3)
        assert_array_almost_equal(freqs, freqs_ni[:-1])
示例#7
0
文件: io.py 项目: choldgraf/ecogtools
    def to_mne(self, states=True):
        """Convert data into an MNE Raw object.

        Parameters
        ----------
        states : bool
            Whether to include state channels in the output.

        Returns
        -------
        raw : instance of MNE Raw
            The data in MNE Raw format.
        """
        if len(self.ch_names) == 0:
            ch_names = ['ch_{}'.format(ii)
                        for ii in range(self.data.shape[0])]
        ch_types = ['eeg'] * len(ch_names)
        if states is True:
            state_names = ['state_{}'.format(ii)
                           for ii in range(self.states.shape[0])]
            state_types = ['misc'] * len(state_names)
            data = np.vstack([self.data, self.states])
        else:
            state_names = state_types = []
            data = self.data
        info = mne.create_info(ch_names + state_names, self.sfreq,
                               ch_types + state_types)
        raw = mne.io.RawArray(data, info)
        return raw
示例#8
0
def test_add_channels():
    """Test tfr splitting / re-appending channel types."""
    data = np.zeros((6, 2, 3))
    times = np.array([.1, .2, .3])
    freqs = np.array([.10, .20])
    info = mne.create_info(
        ['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],
        1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])
    tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
                     nave=20, comment='test', method='crazy-tfr')
    tfr_eeg = tfr.copy().pick_types(meg=False, eeg=True)
    tfr_meg = tfr.copy().pick_types(meg=True)
    tfr_stim = tfr.copy().pick_types(meg=False, stim=True)
    tfr_eeg_meg = tfr.copy().pick_types(meg=True, eeg=True)
    tfr_new = tfr_meg.copy().add_channels([tfr_eeg, tfr_stim])
    assert all(ch in tfr_new.ch_names
               for ch in tfr_stim.ch_names + tfr_meg.ch_names)
    tfr_new = tfr_meg.copy().add_channels([tfr_eeg])

    assert all(ch in tfr_new.ch_names
               for ch in tfr.ch_names if ch != 'STIM 001')
    assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
    assert all(ch not in tfr_new.ch_names for ch in tfr_stim.ch_names)

    # Now test errors
    tfr_badsf = tfr_eeg.copy()
    tfr_badsf.info['sfreq'] = 3.1415927
    tfr_eeg = tfr_eeg.crop(-.1, .1)

    pytest.raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
    pytest.raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
    pytest.raises(ValueError, tfr_meg.add_channels, [tfr_meg])
    pytest.raises(TypeError, tfr_meg.add_channels, tfr_badsf)
示例#9
0
def test_mne_c_design():
    """Test MNE-C filter design."""
    tempdir = _TempDir()
    temp_fname = op.join(tempdir, 'test_raw.fif')
    out_fname = op.join(tempdir, 'test_c_raw.fif')
    x = np.zeros((1, 10001))
    x[0, 5000] = 1.
    time_sl = slice(5000 - 4096, 5000 + 4097)
    sfreq = 1000.
    RawArray(x, create_info(1, sfreq, 'eeg')).save(temp_fname)

    tols = dict(rtol=1e-4, atol=1e-4)
    cmd = ('mne_process_raw', '--projoff', '--raw', temp_fname,
           '--save', out_fname)
    run_subprocess(cmd)
    h = design_mne_c_filter(sfreq, None, 40)
    h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
    assert_allclose(h, h_c, **tols)

    run_subprocess(cmd + ('--highpass', '5', '--highpassw', '2.5'))
    h = design_mne_c_filter(sfreq, 5, 40, 2.5)
    h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
    assert_allclose(h, h_c, **tols)

    run_subprocess(cmd + ('--lowpass', '1000', '--highpass', '10'))
    h = design_mne_c_filter(sfreq, 10, None, verbose=True)
    h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
    assert_allclose(h, h_c, **tols)
示例#10
0
def test_orig_units():
    """Test the error handling for original units."""
    # Should work fine
    info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
    BaseRaw(info, last_samps=[1], orig_units={'Cz': 'nV'})

    # Should complain that channel Cz does not have a corresponding original
    # unit.
    with pytest.raises(ValueError, match='has no associated original unit.'):
        info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
        BaseRaw(info, last_samps=[1], orig_units={'not_Cz': 'nV'})

    # Test that a non-dict orig_units argument raises a ValueError
    with pytest.raises(ValueError, match='orig_units must be of type dict'):
        info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
        BaseRaw(info, last_samps=[1], orig_units=True)
示例#11
0
def test_bad_channels():
    """Test exception when unsupported channels are used."""
    chs = [i for i in _kind_dict]
    data_chs = _DATA_CH_TYPES_SPLIT + ['eog']
    chs_bad = list(set(chs) - set(data_chs))
    info = create_info(len(chs), 500, chs)
    data = np.random.rand(len(chs), 50)
    raw = RawArray(data, info)
    data = np.random.rand(100, len(chs), 50)
    epochs = EpochsArray(data, info)

    n_components = 0.9
    ica = ICA(n_components=n_components, method='fastica')

    for inst in [raw, epochs]:
        for ch in chs_bad:
            # Test case for only bad channels
            picks_bad1 = pick_types(inst.info, meg=False,
                                    **{str(ch): True})
            # Test case for good and bad channels
            picks_bad2 = pick_types(inst.info, meg=True,
                                    **{str(ch): True})
            assert_raises(ValueError, ica.fit, inst, picks=picks_bad1)
            assert_raises(ValueError, ica.fit, inst, picks=picks_bad2)
        assert_raises(ValueError, ica.fit, inst, picks=[])
示例#12
0
def test_filter_picks():
    """Test filtering default channel picks"""
    ch_types = ['mag', 'grad', 'eeg', 'seeg', 'misc', 'stim']
    info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256)
    raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info)

    # -- Deal with meg mag grad exception
    ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg')

    # -- Filter data channels
    for ch_type in ('mag', 'grad', 'eeg', 'seeg'):
        picks = dict((ch, ch == ch_type) for ch in ch_types)
        picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False
        raw_ = raw.pick_types(copy=True, **picks)
        # Avoid RuntimeWarning due to Attenuation
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            raw_.filter(10, 30)
            assert_true(len(w) == 1)

    # -- Error if no data channel
    for ch_type in ('misc', 'stim'):
        picks = dict((ch, ch == ch_type) for ch in ch_types)
        raw_ = raw.pick_types(copy=True, **picks)
        assert_raises(RuntimeError, raw_.filter, 10, 30)
def concatenate_epochs( epoch1, epoch2, ch_name, reversal = False ):
    epoch1_arr = epoch1.get_data()
    epoch2_arr = epoch2.get_data()
    comb_arr = np.concatenate( [epoch1_arr, epoch2_arr] )

    comb_arr_sum = np.zeros(len(comb_arr[0][0]))
    for idx in range(0, len(comb_arr)):
        temp = comb_arr[idx][0]
        comb_arr_sum = comb_arr_sum + temp

    comb_arr_avg = np.array( [comb_arr_sum/len(comb_arr)] ) # get type 2d array

    if reversal:
        comb_arr_avg = comb_arr_avg * (-1)

    info_comb = mne.create_info(
        ch_names = [ch_name]
        , sfreq = epoch1.info['sfreq']
        , ch_types = 'eeg'
        )

    evoked = mne.EvokedArray(comb_arr_avg, info_comb, tmin=tmin)
    # evoked.plot()

    return evoked
示例#14
0
def test_continuous_regression_with_overlap():
    """Test regression with overlap correction."""
    signal = np.zeros(100000)
    times = [1000, 2500, 3000, 5000, 5250, 7000, 7250, 8000]
    events = np.zeros((len(times), 3), int)
    events[:, 2] = 1
    events[:, 0] = times
    signal[events[:, 0]] = 1.
    effect = hann(101)
    signal = np.convolve(signal, effect)[:len(signal)]
    raw = RawArray(signal[np.newaxis, :], mne.create_info(1, 100, 'eeg'))

    assert_allclose(effect, linear_regression_raw(
        raw, events, {1: 1}, tmin=0)[1].data.flatten())

    # test that sklearn solvers can be used
    from sklearn.linear_model.ridge import ridge_regression

    def solver(X, y):
        return ridge_regression(X, y, alpha=0.)
    assert_allclose(effect, linear_regression_raw(
        raw, events, tmin=0, solver=solver)['1'].data.flatten())

    # test bad solvers
    def solT(X, y):
        return ridge_regression(X, y, alpha=0.).T
    assert_raises(ValueError, linear_regression_raw, raw, events, solver=solT)
    assert_raises(ValueError, linear_regression_raw, raw, events, solver='err')
    assert_raises(TypeError, linear_regression_raw, raw, events, solver=0)
示例#15
0
def test_multitaper_psd():
    """Test multi-taper PSD computation."""
    import nitime as ni
    for n_times in (100, 101):
        n_channels = 5
        data = np.random.RandomState(0).randn(n_channels, n_times)
        sfreq = 500
        info = create_info(n_channels, sfreq, 'eeg')
        raw = RawArray(data, info)
        pytest.raises(ValueError, psd_multitaper, raw, sfreq,
                      normalization='foo')
        ni_5 = (LooseVersion(ni.__version__) >= LooseVersion('0.5'))
        norm = 'full' if ni_5 else 'length'
        for adaptive, n_jobs in zip((False, True, True), (1, 1, 2)):
            psd, freqs = psd_multitaper(raw, adaptive=adaptive,
                                        n_jobs=n_jobs,
                                        normalization=norm)
            with warnings.catch_warnings(record=True):  # nitime integers
                freqs_ni, psd_ni, _ = ni.algorithms.spectral.multi_taper_psd(
                    data, sfreq, adaptive=adaptive, jackknife=False)
            assert_array_almost_equal(psd, psd_ni, decimal=4)
            if n_times % 2 == 0:
                # nitime's frequency definitions must be incorrect,
                # they give the same values for 100 and 101 samples
                assert_array_almost_equal(freqs, freqs_ni)
        with pytest.raises(ValueError, match='use a value of at least'):
            psd_multitaper(raw, bandwidth=4.9)
示例#16
0
def test_snr():
    """Test trial to trial coherence"""
    raw = mne.io.Raw(raw_fname)
    sfreq = int(raw.info['sfreq'])
    data, times = raw[0, :5 * sfreq]

    # Create fake epochs from copies of the raw + noise
    n_epochs = 40
    noise_amp = .01 * data.max()
    data = np.tile(data, [n_epochs, 1, 1])
    data += noise_amp * rng.randn(*data.shape)
    info = mne.create_info(['ch1'], raw.info['sfreq'], 'eeg')
    ev = np.vstack([np.arange(n_epochs),
                    np.zeros(n_epochs),
                    np.ones(n_epochs)]).T.astype(int)
    epochs = mne.epochs.EpochsArray(data, info, ev)

    # Test CC
    cc = snr_epochs(epochs, kind='corr')
    assert_true((cc > .99).all())

    # Test coherence
    coh, freqs = snr_epochs(epochs, fmin=2, kind='coh')
    assert_true((coh.mean(-1) > .99).all())

    # Test random signal
    data_rand = 10*rng.randn(*data.shape)
    epochs_rand = mne.epochs.EpochsArray(data_rand, info, ev)
    cc = snr_epochs(epochs_rand, kind='corr')
    assert_true(cc.mean() < .02)

    # Test incorrect inputs
    assert_raises(ValueError, snr_epochs, epochs, kind='foo')
示例#17
0
def write_mnefiff(data, filename):
    """Export data to MNE using FIFF format.

    Parameters
    ----------
    data : instance of ChanTime
        data with only one trial
    filename : path to file
        file to export to (include '.mat')

    Notes
    -----
    It cannot store data larger than 2 GB.
    The data is assumed to have only EEG electrodes.
    It overwrites a file if it exists.
    """
    from mne import create_info, set_log_level
    from mne.io import RawArray

    set_log_level(WARNING)

    TRIAL = 0
    info = create_info(list(data.axis['chan'][TRIAL]), data.s_freq, ['eeg', ] *
                       data.number_of('chan')[TRIAL])

    UNITS = 1e-6  # mne wants data in uV
    fiff = RawArray(data.data[0] * UNITS, info)

    if data.attr['chan']:
        fiff.set_channel_positions(data.attr['chan'].return_xyz(),
                                   data.attr['chan'].return_label())

    fiff.save(filename, overwrite=True)
示例#18
0
def creat_mne_raw_object(fname,read_events):
    # Read EEG file
    data = pd.read_csv(fname)
    
    # get chanel names
    ch_names = list(data.columns[1:])
    
    # read EEG standard montage from mne
    montage = read_montage('standard_1005',ch_names)

    ch_type = ['eeg']*len(ch_names)
    data = 1e-6*np.array(data[ch_names]).T
    
    if read_events:
        # events file
        ev_fname = fname.replace('_data','_events')
        # read event file
        events = pd.read_csv(ev_fname)
        events_names = events.columns[1:]
        events_data = np.array(events[events_names]).T
        
        # define channel type, the first is EEG, the last 6 are stimulations
        ch_type.extend(['stim']*6)
        ch_names.extend(events_names)
        # concatenate event file and data
        data = np.concatenate((data,events_data))
        
    # create and populate MNE info structure
    info = create_info(ch_names,sfreq=500.0, ch_types=ch_type, montage=montage)
    info['filename'] = fname
    
    # create raw object 
    raw = RawArray(data,info,verbose=False)
    
    return raw
示例#19
0
def test_pick_bio():
    """Test picking BIO channels."""
    names = 'A1 A2 Fz O BIO1 BIO2 BIO3'.split()
    types = 'mag mag eeg eeg bio bio bio'.split()
    info = create_info(names, 1024., types)
    picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), ('bio', [4, 5, 6])]
    assert_indexing(info, picks_by_type, all_data=False)
示例#20
0
def _read_evoked(fname, sensor_mode, info):
    """ helper to read evokeds """
    data = scio.loadmat(fname, squeeze_me=True)['data']
    ch_names = [ch for ch in data['label'].tolist()]

    times = data['time'].tolist()
    sfreq = 1. / np.diff(times)[0]
    if info is None:
        info = create_info(
            ch_names, ch_types=[sensor_mode] * len(ch_names),
            sfreq=sfreq)
    else:
        info = _hcp_pick_info(info, ch_names)
        info['sfreq'] = sfreq
    orig_labels = list(data['grad'].tolist()['label'].tolist())
    sel = [orig_labels.index(ch) for ch in ch_names]
    pos = data['grad'].tolist()['chanpos'].tolist()[sel]
    out = list()
    comment = ('_'.join(fname.split('/')[-1].split('_')[2:])
                  .replace('.mat', '')
                  .replace('_eravg_', '_')
                  .replace('[', '')
                  .replace(']', ''))
    for key, kind in (('var', 'standard_error'), ('avg', 'average')):
        evoked = EvokedArray(
            data=data[key].tolist(), info=info, tmin=min(times),
            kind=kind, comment=comment)
        evoked._set_channel_positions(pos, ch_names)
        out.append(evoked)
    return out
示例#21
0
def file_to_nparray(fname, sfreq=100.0, verbose=False):
    """
	Create a mne raw instance from csv file.
	"""
    # get channel names
    # in MNE, this means you must config ith two arrays:
    # 1) an array of channel names as strings
    # 2) corresponding array of channel types. in our case, all channels are type 'eeg'
    ch_names = getChannelNames()
    ch_type = ["eeg"] * len(ch_names)

    # add one more channel called 'class_label' as type 'stim'
    ch_names.extend(["class_label"])
    ch_type.extend(["stim"])

    # Read EEG file
    data = pd.read_table(fname, header=None, names=ch_names)
    raw_data = np.array(data[ch_names]).T
    # print raw_data.shape

    # create and populate MNE info structure
    info = create_info(ch_names, sfreq=sfreq, ch_types=ch_type)
    info["filename"] = fname

    # create raw object
    return [raw_data, info]
示例#22
0
 def compute_bpm(self, y):
     raw = RawArray(np.array([y]),
                    create_info(['channel_0'], self.sampling_frequency,
                                ch_types=['grad']))
     ecg_epochs = mne.preprocessing.find_ecg_events(raw)
     bpm = ecg_epochs[2]
     return bpm
示例#23
0
def test_raw_reject():
    """Test raw data getter with annotation reject."""
    info = create_info(['a', 'b', 'c', 'd', 'e'], 100, ch_types='eeg')
    raw = RawArray(np.ones((5, 15000)), info)
    with warnings.catch_warnings(record=True):  # one outside range
        raw.annotations = Annotations([2, 100, 105, 148], [2, 8, 5, 8], 'BAD')
    data = raw.get_data([0, 1, 3, 4], 100, 11200, 'omit')
    assert_array_equal(data.shape, (4, 9900))

    # with orig_time and complete overlap
    raw = read_raw_fif(fif_fname)
    raw.annotations = Annotations([44, 47, 48], [1, 3, 1], 'BAD',
                                  raw.info['meas_date'])
    data, times = raw.get_data(range(10), 0, 6000, 'omit', True)
    assert_array_equal(data.shape, (10, 4799))
    assert_equal(times[-1], raw.times[5999])
    assert_array_equal(data[:, -100:], raw[:10, 5900:6000][0])

    data, times = raw.get_data(range(10), 0, 6000, 'NaN', True)
    assert_array_equal(data.shape, (10, 6000))
    assert_equal(times[-1], raw.times[5999])
    assert_true(np.isnan(data[:, 313:613]).all())  # 1s -2s
    assert_true(not np.isnan(data[:, 614].any()))
    assert_array_equal(data[:, -100:], raw[:10, 5900:6000][0])
    assert_array_equal(raw.get_data(), raw[:][0])

    # Test _sync_onset
    times = [10, -88, 190]
    onsets = _sync_onset(raw, times)
    assert_array_almost_equal(onsets, times - raw.first_samp /
                              raw.info['sfreq'])
    assert_array_almost_equal(times, _sync_onset(raw, onsets, True))
示例#24
0
def test_chunk_duration():
    """Test chunk_duration."""
    # create dummy raw
    raw = RawArray(data=np.empty([10, 10], dtype=np.float64),
                   info=create_info(ch_names=10, sfreq=1.),
                   first_samp=0)
    raw.info['meas_date'] = 0
    raw.set_annotations(Annotations(description='foo', onset=[0],
                                    duration=[10], orig_time=None))

    # expected_events = [[0, 0, 1], [0, 0, 1], [1, 0, 1], [1, 0, 1], ..
    #                    [9, 0, 1], [9, 0, 1]]
    expected_events = np.atleast_2d(np.repeat(range(10), repeats=2)).T
    expected_events = np.insert(expected_events, 1, 0, axis=1)
    expected_events = np.insert(expected_events, 2, 1, axis=1)

    events, events_id = events_from_annotations(raw, chunk_duration=.5,
                                                use_rounding=False)
    assert_array_equal(events, expected_events)

    # test chunk durations that do not fit equally in annotation duration
    expected_events = np.zeros((3, 3))
    expected_events[:, -1] = 1
    expected_events[:, 0] = np.arange(0, 9, step=3)
    events, events_id = events_from_annotations(raw, chunk_duration=3.)
    assert_array_equal(events, expected_events)
def creat_mne_raw_object(fname, read_events = True):

    print ("loading data from %s" %fname)
    data = pd.read_csv(fname)

    ch_names = list(data.columns[1:])

    montage = read_montage('standard_1005', ch_names)
    ch_type = ['eeg']*len(ch_names)
    data = 1e-6*np.array(data[ch_names]).T
    
    if read_events:
        ev_fname = fname.replace('_data', '_events')
        print (ev_fname)
        events = pd.read_csv(ev_fname)
        events_names = events.columns[1:]
        events_data = np.array(events[events_names]).T

        ch_type.extend(['stim']*6)
        ch_names.extend(events_names)
        data = np.concatenate((data, events_data))
        
    info = create_info(ch_names, sfreq=500.0, ch_types=ch_type, montage=montage)
    info['filename'] = fname
    raw = RawArray(data, info, verbose=True)

    return raw
示例#26
0
    def get_data(self, series, change=False):
        # Get the lift EEG data
        ws = self._series_data(series)
        lifts = self.lift_info()

        montage = mne.channels.read_montage('standard_1005', ws.names.eeg)

        ev_names = self.all_event_names()

        ch_names = list(ws.names.eeg) + ev_names

        X = []
        y = []
        # loop through the windows, creating a raw object for each one
        for win_num, window in enumerate(ws.win):
            events = np.zeros((window.eeg.shape[0], len(ev_names)))
            row = lifts[(lifts['Run'] == series) & (lifts['Lift'] == win_num + 1)]
            if change and np.isnan(row.iloc[0]['PrevW']):
                continue

            for ev_num, ev_name in enumerate(ev_names):
                t = row.iloc[0]['t' + ev_name]
                event_rows = np.abs(window.eeg_t - t) <= 0.15 + 1e-10
                events[event_rows, ev_num] = 1
            data = np.concatenate((1e-6 * window.eeg.T, events.T))
            ch_type = ['eeg'] * len(ws.names.eeg) + ['stim'] * len(ev_names)

            info = mne.create_info(ch_names, sfreq=500., ch_types=ch_type, montage=montage)
            # Return a dictionary, in case we want to incorporate non-EEG data, e.g. previous weight
            X.append({'eeg': mne.io.RawArray(data, info, verbose=False)})
            #Cast weight to integer, so that we don't have floating point problems
            yval = row.iloc[0]['CurW']==row.iloc[0]['PrevW'] if change else row.iloc[0]['CurW']
            y.append(yval)

        return X, y
示例#27
0
def _raw_annot(meas_date, orig_time):
    info = create_info(ch_names=10, sfreq=10.)
    raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10)
    raw.info['meas_date'] = meas_date
    annot = Annotations([.5], [.2], ['dummy'], orig_time)
    raw.set_annotations(annotations=annot)
    return raw
示例#28
0
def test_annotation_property_deprecation_warning():
    """Test that assigning annotations warns and nowhere else."""
    with pytest.warns(None) as w:
        raw = RawArray(np.random.rand(1, 1), create_info(1, 1))
    assert len(w) is 0
    with pytest.warns(DeprecationWarning, match='by assignment is deprecated'):
        raw.annotations = None
示例#29
0
def create_random_epochs(nep, nchan, ntime, sfreq,
                         nclasses=2, ch_types='eeg'):
    data = np.random.randn(nep*nclasses, nchan, ntime*sfreq)
    ev = create_random_events(nep, ntime*sfreq, nclasses)
    info = mne.create_info([str(i) for i in range(nchan)], sfreq, ch_types)
    ep = mne.epochs.EpochsArray(data, info, ev)
    return ep
示例#30
0
def test_add_channels():
    """Test tfr splitting / re-appending channel types
    """
    data = np.zeros((6, 2, 3))
    times = np.array([0.1, 0.2, 0.3])
    freqs = np.array([0.10, 0.20])
    info = mne.create_info(
        ["MEG 001", "MEG 002", "MEG 003", "EEG 001", "EEG 002", "STIM 001"],
        1000.0,
        ["mag", "mag", "mag", "eeg", "eeg", "stim"],
    )
    tfr = AverageTFR(info, data=data, times=times, freqs=freqs, nave=20, comment="test", method="crazy-tfr")
    tfr_eeg = tfr.pick_types(meg=False, eeg=True, copy=True)
    tfr_meg = tfr.pick_types(meg=True, copy=True)
    tfr_stim = tfr.pick_types(meg=False, stim=True, copy=True)
    tfr_eeg_meg = tfr.pick_types(meg=True, eeg=True, copy=True)
    tfr_new = tfr_meg.add_channels([tfr_eeg, tfr_stim], copy=True)
    assert_true(all(ch in tfr_new.ch_names for ch in tfr_stim.ch_names + tfr_meg.ch_names))
    tfr_new = tfr_meg.add_channels([tfr_eeg], copy=True)

    assert_true(ch in tfr_new.ch_names for ch in tfr.ch_names)
    assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
    assert_true(all(ch not in tfr_new.ch_names for ch in tfr_stim.ch_names))

    # Now test errors
    tfr_badsf = tfr_eeg.copy()
    tfr_badsf.info["sfreq"] = 3.1415927
    tfr_eeg = tfr_eeg.crop(-0.1, 0.1)

    assert_raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
    assert_raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
    assert_raises(ValueError, tfr_meg.add_channels, [tfr_meg])
    assert_raises(AssertionError, tfr_meg.add_channels, tfr_badsf)
示例#31
0
def test_plot_misc_auto():
    """Test plotting of data with misc auto scaling."""
    data = np.random.RandomState(0).randn(1, 1000)
    raw = RawArray(data, create_info(1, 1000., 'misc'))
    raw.plot()
    plt.close('all')
示例#32
0
def get_PB_data(subject, trial, config):
    """Get the force and position data of a subject and a trial.

    Parameters
    ----------
    subject : str
        A string of subject ID e.g. 7707.
    trial : str
        A trail e.g. HighFine..
    config : yaml
        The configuration file.
    Returns
    ----------
    robot_data : array
        A numpy array containing  total_force

    """
    # path of the files
    if subject in config['subjects2']:
        PB_path = Path(__file__).parents[2] / config[
            'exp2_data_path'] / subject / trial / 'PB.csv'
        EMG_path = Path(__file__).parents[2] / config[
            'exp2_data_path'] / subject / trial / 'EMG.csv'
    else:
        if trial not in config['comb_trials']:
            PB_path = get_trial_path(subject, trial, config, 'PB')
            EMG_path = get_trial_path(subject, trial, config, 'EMG')

    # read the data
    PB_data = np.genfromtxt(
        PB_path,
        dtype=float,
        delimiter=',',
        unpack=True,
        usecols=[16, 17, 19, 20],  # [13, 14]-Fx,Fy; [16,17]-Mx,My
        skip_footer=config['skip_footer'],
        skip_header=config['skip_header'])
    time_data = np.genfromtxt(PB_path,
                              dtype=str,
                              delimiter=',',
                              unpack=True,
                              usecols=0,
                              skip_footer=config['skip_footer'],
                              skip_header=config['skip_header'])

    # Moments are used in Amir's experiment. The moment to force conversion scalar provided in the experiment are [-10.0,-10.0]
    PB_data[0:2, :] = np.multiply(
        PB_data[0:2, :],
        np.array([-10, 10
                  ]).reshape(2,
                             1))  # only when 16, 17 are used instead of 13, 14

    # Calculate the forces in the tangential and the normal directions
    PB_data = tangential_normal_force_components(PB_data)

    PB_data = PCA_force_components(PB_data, wind_len=4)

    # get the actual trial start and end time based on the PB and MYO data
    if subject in config['subjects2']:
        trial_start, trial_end, _ = get_trial_time_exp2(
            subject, trial, PB_path, EMG_path, config)
        time_data, sfreq = convert_time(time_data)

        indices = np.all([time_data >= trial_start, time_data <= trial_end],
                         axis=0)

        time_data = time_data[indices[:, 0]]
        PB_data = PB_data[:, indices[:, 0]]

    else:
        trial_start, trial_end, _ = get_trial_time(subject, trial, config)
        time_data, sfreq = convert_time(time_data)

    # creating an mne object
    info = mne.create_info(
        ch_names=['Fx', 'Fy', 'X', 'Y', 'Ft', 'Fn', 'F_pca1', 'F_pca2'],
        sfreq=sfreq,
        ch_types=['misc'] * 8)
    raw = mne.io.RawArray(PB_data, info, verbose=False)

    return raw, time_data
示例#33
0
def get_raw_emg_exp2(subject, trial, config):
    """Get the raw emg data for a subject and trail from experiment 2

    Parameters
    ----------
    subject : str
        A string of subject ID e.g. 7707.
    trial : str
        A trail e.g. HighFine.
    config : yaml
        The configuration file.

    Returns
    -------
    mne object
        A raw mne object.

    """
    # path of the PB file
    # if subject in config['subjects2']:
    #     trial_path = Path(__file__).parents[2] / config['exp2_data_path'] / subject / trial / 'PB.csv'
    # else:
    #     trial_path = get_trial_path(subject, trial, config, 'PB')

    # path of the files
    # if subject in config['subjects2']:
    PB_path = Path(__file__).parents[2] / config[
        'exp2_data_path'] / subject / trial / 'PB.csv'
    EMG_path = Path(__file__).parents[2] / config[
        'exp2_data_path'] / subject / trial / 'EMG.csv'
    # else:
    #     PB_path  = get_trial_path(subject, trial, config, 'PB')
    #     EMG_path = get_trial_path(subject, trial, config, 'EMG')

    # path of the EMG file
    filepath = Path(
        __file__).parents[2] / config['exp2_data_path'] / subject / trial

    if filepath.exists():
        for file in filepath.iterdir():
            if (file.name.split('.')[0] == 'EMG'):
                # Get the time from EMG.csv
                time = np.genfromtxt(file,
                                     dtype=None,
                                     delimiter=',',
                                     unpack=True,
                                     skip_footer=config['skip_footer'],
                                     skip_header=config['skip_header'],
                                     usecols=0,
                                     encoding=None)
                # Get the EMG data
                EMG_data = np.genfromtxt(file,
                                         dtype=float,
                                         delimiter=',',
                                         unpack=True,
                                         skip_footer=config['skip_footer'],
                                         skip_header=config['skip_header'],
                                         usecols=np.arange(1, 9),
                                         encoding=None)

                # get the actual trial start and end time based on the PB and MYO data
                if subject in config['subjects2']:
                    trial_start, trial_end, _ = get_trial_time_exp2(
                        subject, trial, PB_path, EMG_path, config)
                else:
                    trial_start, trial_end, _ = get_trial_time(
                        subject, trial, config)

                time_EMG, sfreq = convert_time(time)

                indices = np.all(
                    [time_EMG >= trial_start, time_EMG <= trial_end], axis=0)

                time_EMG = time_EMG[indices[:, 0]]
                EMG_data = EMG_data[:, indices[:, 0]]

                # Create mne raw object
                info = mne.create_info(ch_names=[
                    'emg_1', 'emg_2', 'emg_3', 'emg_4', 'emg_5', 'emg_6',
                    'emg_7', 'emg_8'
                ],
                                       ch_types=['misc'] * EMG_data.shape[0],
                                       sfreq=sfreq)

                # Create mne raw file
                raw = mne.io.RawArray(EMG_data, info, verbose=False)

                # Additional information
                raw.info['subject_info'] = subject

                return raw, [trial_start[:], trial_end[:]]
    else:
        return [], []
示例#34
0
def load_muse_csv_as_raw(filename,
                         sfreq=256.,
                         ch_ind=[0, 1, 2, 3],
                         stim_ind=5,
                         replace_ch_names=None):
    """Load CSV files into a Raw object.

    Args:
        filename (str or list): path or paths to CSV files to load

    Keyword Args:
        subject_nb (int or str): subject number. If 'all', load all
            subjects.
        session_nb (int or str): session number. If 'all', load all
            sessions.
        sfreq (float): EEG sampling frequency
        ch_ind (list): indices of the EEG channels to keep
        stim_ind (int): index of the stim channel
        replace_ch_names (dict or None): dictionary containing a mapping to
            rename channels. Useful when an external electrode was used.

    Returns:
        (mne.io.array.array.RawArray): loaded EEG
    """
    n_channel = len(ch_ind)

    raw = []
    for fname in filename:
        # read the file
        data = pd.read_csv(fname, index_col=0)

        # name of each channels
        ch_names = list(data.columns)[0:n_channel] + ['Stim']

        if replace_ch_names is not None:
            ch_names = [
                c if c not in replace_ch_names.keys() else replace_ch_names[c]
                for c in ch_names
            ]

        # type of each channels
        ch_types = ['eeg'] * n_channel + ['stim']
        montage = read_montage('standard_1005')

        # get data and exclude Aux channel
        data = data.values[:, ch_ind + [stim_ind]].T

        # convert in Volts (from uVolts)
        data[:-1] *= 1e-6

        # create MNE object
        info = create_info(ch_names=ch_names,
                           ch_types=ch_types,
                           sfreq=sfreq,
                           montage=montage)
        raw.append(RawArray(data=data, info=info))

    # concatenate all raw objects
    raws = concatenate_raws(raw)

    return raws
示例#35
0
def annotate_motion_artifacts(raw,
                              pos,
                              disp_thr=0.01,
                              velo_thr=0.03,
                              gof_thr=0.99,
                              return_stat_raw=False):
    """Find and annotate periods of high HPI velocity and high HPI distance."""
    annot = Annotations([], [], [])

    info = raw.info
    # grab initial cHPI locations
    # point sorted in hpi_results are in mne device coords
    chpi_locs_dev = sorted([d for d in info['hpi_results'][-1]['dig_points']],
                           key=lambda x: x['ident'])
    chpi_locs_dev = np.array([d['r'] for d in chpi_locs_dev])
    # chpi_locs_dev[0] -> LPA
    # chpi_locs_dev[1] -> NASION
    # chpi_locs_dev[2] -> RPA
    chpi_static_head = apply_trans(info['dev_head_t'], chpi_locs_dev)

    time = pos[:, 0]
    n_hpi = chpi_static_head.shape[0]
    quats = pos[:, 1:7]
    chpi_moving_head = np.array(
        [_apply_quat(quat, chpi_locs_dev, move=True) for quat in quats])

    # compute displacements
    hpi_disp = chpi_moving_head - np.tile(chpi_static_head, (len(time), 1, 1))
    hpi_disp = np.sqrt((hpi_disp**2).sum(axis=-1))
    # compute velocities
    hpi_velo = chpi_moving_head[1:, :, :] - chpi_moving_head[:-1, :, :]
    hpi_velo = np.sqrt((hpi_velo**2).sum(axis=-1))
    hpi_velo /= np.tile(time[1:] - time[:-1], (n_hpi, 1)).transpose()
    hpi_velo = np.concatenate((np.zeros((1, n_hpi)), hpi_velo), axis=0)

    if disp_thr is not None:
        art_mask = hpi_disp > disp_thr
        annot += _annotations_from_mask(time, art_mask,
                                        'Bad-motion-dist>%0.3f' % disp_thr)
    if velo_thr is not None:
        art_mask = hpi_velo > velo_thr
        annot += _annotations_from_mask(time, art_mask,
                                        'Bad-motion-velo>%0.3f' % velo_thr)

    if gof_thr is not None:
        art_mask = pos[:, 7] <= gof_thr
        annot += _annotations_from_mask(time, art_mask,
                                        'Bad-chpi_gof>%0.3f' % gof_thr)

    tmp = 1000 * hpi_disp.max(axis=0)
    _fmt = '\tHPI00 - %0.1f'
    for i in range(1, n_hpi):
        _fmt += '\n\tHPI%02d' % (i) + ' - %0.1f'
    logger.info('CHPI MAX Displacments (mm):')
    logger.info(_fmt % tuple(tmp))
    tmp = 1000 * hpi_velo.max(axis=0)
    logger.info('CHPI Velocity Displacments (mm/sec):')
    logger.info(_fmt % tuple(tmp))

    raw_hpi = None
    if return_stat_raw:
        n_times = len(raw.times)
        # build full time data arrays
        start_idx = raw.time_as_index(time, use_rounding=True)
        end_idx = raw.time_as_index(np.append(time[1:], raw.times[-1]),
                                    use_rounding=True)
        data_pos = np.zeros((2 * n_hpi, n_times))
        for t_0, t_1, disp_val, velo_val in zip(start_idx, end_idx, hpi_disp,
                                                hpi_velo):
            t_slice = slice(t_0, t_1)
            data_pos[:n_hpi, t_slice] = np.tile(disp_val, (t_1 - t_0, 1)).T
            data_pos[n_hpi:, t_slice] = np.tile(velo_val, (t_1 - t_0, 1)).T

        ch_names = []
        ch_names_ = []
        for i in range(n_hpi):
            ch_names.append('HPI%02d_disp_pos' % i)
            ch_names_.append('HPI%02d_velo_pos' % i)
        ch_names.extend(ch_names_)

        # build raw object!
        info = create_info(ch_names=ch_names,
                           ch_types=np.repeat('misc', len(ch_names)),
                           sfreq=raw.info['sfreq'])
        raw_hpi = RawArray(data_pos, info)

    return annot, raw_hpi
示例#36
0
                                   pick_ori=None)
        raw_stc = apply_inverse_raw(raw,
                                    inv,
                                    lambda2=1.0 / (2**snr_raw),
                                    method=source_method,
                                    label=label,
                                    pick_ori=None)
        ch_names.append(label.name)
        evoked_stcs.append(evoked_stc.data.mean(0))
        raw_stcs.append(raw_stc.data.mean(0))
    evoked_stcs = np.array(evoked_stcs)
    raw_stcs = np.array(raw_stcs)

# Create info for raw_source and evoked_source
info_raw = mne.create_info(ch_names=ch_names,
                           ch_types=['stim'] + ['mag'] * len(labels),
                           sfreq=600)
info_evoked = mne.create_info(ch_names=ch_names[1:],
                              ch_types=['mag'] * len(labels),
                              sfreq=600)
raw_source = mne.io.RawArray(raw_stcs, info_raw)
evoked_source = mne.EvokedArray(evoked_stcs, info_evoked)

# save raw_source and evoked_source
results_folder = op.join(path_data, subject, 'source_signal')
if not op.isdir(results_folder):
    os.mkdir(results_folder)
fname = op.join(results_folder, '%s_raw_source.fif' % subject)
raw_source.save(fname, overwrite=True)
fname = op.join(results_folder, '%s_evoked_source.fif' % subject)
evoked_source.save(fname)
示例#37
0
eval_events_path = "/arquivos/Documents/eeg_data/doutorado_cleison/true_labels/A01E.csv"

# raw = mne.io.read_raw_eeglab(data_train_path)

data, fs = loadBiosig(data_train_path)
data = nanCleaner(data)

ch_names = ['Fz', 'EEG1', 'EEG2', 'EEG3', 'EEG4', 'EEG5', 'EEG6', 'EEG-C3', 'EEG7', 'EEG-Cz', 'EEG8', 
'EEG-C4', 'EEG9', 'EEG10', 'EEG11', 'EEG12', 'EEG13', 'EEG14', 'EEG15', 'EEG-Pz', 'EEG16', 'EEG17', 'EOG1', 'EOG2', 'EOG3' ]

sfreq = fs

ch_types = ['eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 
'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eog', 'eog', 'eog' ]

info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)

raw = mne.io.RawArray(data, info)

events_list = readEvents(train_events_path)

info_stim = mne.create_info(ch_names=['stim_clean'], sfreq=raw.info['sfreq'], ch_types=['stim'])
info_stim['buffer_size_sec'] = raw.info['buffer_size_sec']
data_dum = np.zeros([1, data.shape[1]])
raw_stim = mne.io.RawArray(data_dum, info=info_stim)
raw.add_channels([raw_stim])

raw.add_events(events_list, stim_channel = None)

# Processing beggining:
tmin, tmax = -1.5, 3.5 # time before event, time after event
示例#38
0
def test_io():
    """Test TFR IO capacities."""
    from pandas import DataFrame
    tempdir = _TempDir()
    fname = op.join(tempdir, 'test-tfr.h5')
    data = np.zeros((3, 2, 3))
    times = np.array([.1, .2, .3])
    freqs = np.array([.10, .20])

    info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
                           ['mag', 'mag', 'mag'])
    tfr = AverageTFR(info,
                     data=data,
                     times=times,
                     freqs=freqs,
                     nave=20,
                     comment='test',
                     method='crazy-tfr')
    tfr.save(fname)
    tfr2 = read_tfrs(fname, condition='test')

    assert_array_equal(tfr.data, tfr2.data)
    assert_array_equal(tfr.times, tfr2.times)
    assert_array_equal(tfr.freqs, tfr2.freqs)
    assert_equal(tfr.comment, tfr2.comment)
    assert_equal(tfr.nave, tfr2.nave)

    pytest.raises(IOError, tfr.save, fname)

    tfr.comment = None
    tfr.save(fname, overwrite=True)
    assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
    tfr.comment = 'test-A'
    tfr2.comment = 'test-B'

    fname = op.join(tempdir, 'test2-tfr.h5')
    write_tfrs(fname, [tfr, tfr2])
    tfr3 = read_tfrs(fname, condition='test-A')
    assert_equal(tfr.comment, tfr3.comment)

    assert (isinstance(tfr.info, mne.Info))

    tfrs = read_tfrs(fname, condition=None)
    assert_equal(len(tfrs), 2)
    tfr4 = tfrs[1]
    assert_equal(tfr2.comment, tfr4.comment)

    pytest.raises(ValueError, read_tfrs, fname, condition='nonono')
    # Test save of EpochsTFR.
    n_events = 5
    data = np.zeros((n_events, 3, 2, 3))

    # create fake metadata
    rng = np.random.RandomState(42)
    rt = np.round(rng.uniform(size=(n_events, )), 3)
    trialtypes = np.array(['face', 'place'])
    trial = trialtypes[(rng.uniform(size=(n_events, )) > .5).astype(int)]
    meta = DataFrame(dict(RT=rt, Trial=trial))
    # fake events and event_id
    events = np.zeros([n_events, 3])
    events[:, 0] = np.arange(n_events)
    events[:, 2] = np.ones(n_events)
    event_id = dict(a=1)

    tfr = EpochsTFR(info,
                    data=data,
                    times=times,
                    freqs=freqs,
                    comment='test',
                    method='crazy-tfr',
                    events=events,
                    event_id=event_id,
                    metadata=meta)
    tfr.save(fname, True)
    read_tfr = read_tfrs(fname)[0]
    assert_array_equal(tfr.data, read_tfr.data)
    assert_metadata_equal(tfr.metadata, read_tfr.metadata)
    assert_array_equal(tfr.events, read_tfr.events)
    assert_equal(tfr.event_id, read_tfr.event_id)
示例#39
0
def test_tfr_multitaper():
    """Test tfr_multitaper."""
    sfreq = 200.0
    ch_names = ['SIM0001', 'SIM0002']
    ch_types = ['grad', 'grad']
    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)

    n_times = int(sfreq)  # Second long epochs
    n_epochs = 3
    seed = 42
    rng = np.random.RandomState(seed)
    noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
    t = np.arange(n_times, dtype=np.float) / sfreq
    signal = np.sin(np.pi * 2. * 50. * t)  # 50 Hz sinusoid signal
    signal[np.logical_or(t < 0.45, t > 0.55)] = 0.  # Hard windowing
    on_time = np.logical_and(t >= 0.45, t <= 0.55)
    signal[on_time] *= np.hanning(on_time.sum())  # Ramping
    dat = noise + signal

    reject = dict(grad=4000.)
    events = np.empty((n_epochs, 3), int)
    first_event_sample = 100
    event_id = dict(sin50hz=1)
    for k in range(n_epochs):
        events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']

    epochs = EpochsArray(data=dat,
                         info=info,
                         events=events,
                         event_id=event_id,
                         reject=reject)

    freqs = np.arange(35, 70, 5, dtype=np.float)

    power, itc = tfr_multitaper(epochs,
                                freqs=freqs,
                                n_cycles=freqs / 2.,
                                time_bandwidth=4.0)
    power2, itc2 = tfr_multitaper(epochs,
                                  freqs=freqs,
                                  n_cycles=freqs / 2.,
                                  time_bandwidth=4.0,
                                  decim=slice(0, 2))
    picks = np.arange(len(ch_names))
    power_picks, itc_picks = tfr_multitaper(epochs,
                                            freqs=freqs,
                                            n_cycles=freqs / 2.,
                                            time_bandwidth=4.0,
                                            picks=picks)
    power_epochs = tfr_multitaper(epochs,
                                  freqs=freqs,
                                  n_cycles=freqs / 2.,
                                  time_bandwidth=4.0,
                                  return_itc=False,
                                  average=False)
    power_averaged = power_epochs.average()
    power_evoked = tfr_multitaper(epochs.average(),
                                  freqs=freqs,
                                  n_cycles=freqs / 2.,
                                  time_bandwidth=4.0,
                                  return_itc=False,
                                  average=False).average()

    print(power_evoked)  # test repr for EpochsTFR

    # Test channel picking
    power_epochs_picked = power_epochs.copy().drop_channels(['SIM0002'])
    assert_equal(power_epochs_picked.data.shape, (3, 1, 7, 200))
    assert_equal(power_epochs_picked.ch_names, ['SIM0001'])

    pytest.raises(ValueError,
                  tfr_multitaper,
                  epochs,
                  freqs=freqs,
                  n_cycles=freqs / 2.,
                  return_itc=True,
                  average=False)

    # test picks argument
    assert_array_almost_equal(power.data, power_picks.data)
    assert_array_almost_equal(power.data, power_averaged.data)
    assert_array_almost_equal(power.times, power_epochs.times)
    assert_array_almost_equal(power.times, power_averaged.times)
    assert_equal(power.nave, power_averaged.nave)
    assert_equal(power_epochs.data.shape, (3, 2, 7, 200))
    assert_array_almost_equal(itc.data, itc_picks.data)
    # one is squared magnitude of the average (evoked) and
    # the other is average of the squared magnitudes (epochs PSD)
    # so values shouldn't match, but shapes should
    assert_array_equal(power.data.shape, power_evoked.data.shape)
    pytest.raises(AssertionError, assert_array_almost_equal, power.data,
                  power_evoked.data)

    tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
    fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
    assert (tmax > 0.3 and tmax < 0.7)
    assert not np.any(itc.data < 0.)
    assert (fmax > 40 and fmax < 60)
    assert (power2.data.shape == (len(picks), len(freqs), 2))
    assert (power2.data.shape == itc2.data.shape)

    # Test decim parameter checks and compatibility between wavelets length
    # and instance length in the time dimension.
    pytest.raises(TypeError,
                  tfr_multitaper,
                  epochs,
                  freqs=freqs,
                  n_cycles=freqs / 2.,
                  time_bandwidth=4.0,
                  decim=(1, ))
    pytest.raises(ValueError,
                  tfr_multitaper,
                  epochs,
                  freqs=freqs,
                  n_cycles=1000,
                  time_bandwidth=4.0)
示例#40
0
from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet,
                                tfr_array_morlet)
from mne.viz import centers_to_edges

print(__doc__)

###############################################################################
# Simulate data
# -------------
#
# We'll simulate data with a known spectro-temporal structure.

sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)

n_times = 1024  # Just over 1 second epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)

# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float64) / sfreq
signal = np.sin(np.pi * 2. * 50. * t)  # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0.  # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum())  # Ramping
data = noise + signal
示例#41
0
def test_scalings_int():
    """Test that auto scalings access samples using integers."""
    raw = RawArray(np.zeros((1, 500)), create_info(1, 1000., 'eeg'))
    raw.plot(scalings='auto')
示例#42
0
def test_plot_raw_psd():
    """Test plotting of raw psds."""
    raw = _get_raw()
    # normal mode
    raw.plot_psd(average=False)
    # specific mode
    picks = pick_types(raw.info, meg='mag', eeg=False)[:4]
    raw.plot_psd(tmax=None, picks=picks, area_mode='range', average=False,
                 spatial_colors=True)
    raw.plot_psd(tmax=20., color='yellow', dB=False, line_alpha=0.4,
                 n_overlap=0.1, average=False)
    plt.close('all')
    ax = plt.axes()
    # if ax is supplied:
    pytest.raises(ValueError, raw.plot_psd, ax=ax, average=True)
    raw.plot_psd(tmax=None, picks=picks, ax=ax, average=True)
    plt.close('all')
    ax = plt.axes()
    with pytest.raises(ValueError, match='2 axes must be supplied, got 1'):
        raw.plot_psd(ax=ax, average=True)
    plt.close('all')
    ax = plt.subplots(2)[1]
    raw.plot_psd(tmax=None, ax=ax, average=True)
    plt.close('all')
    # topo psd
    ax = plt.subplot()
    raw.plot_psd_topo(axes=ax)
    plt.close('all')
    # with channel information not available
    for idx in range(len(raw.info['chs'])):
        raw.info['chs'][idx]['loc'] = np.zeros(12)
    with pytest.warns(RuntimeWarning, match='locations not available'):
        raw.plot_psd(spatial_colors=True, average=False)
    # with a flat channel
    raw[5, :] = 0
    for dB, estimate in itertools.product((True, False),
                                          ('power', 'amplitude')):
        with pytest.warns(UserWarning, match='[Infinite|Zero]'):
            fig = raw.plot_psd(average=True, dB=dB, estimate=estimate)
        ylabel = fig.axes[1].get_ylabel()
        ends_dB = ylabel.endswith('mathrm{(dB)}$')
        if dB:
            assert ends_dB, ylabel
        else:
            assert not ends_dB, ylabel
        if estimate == 'amplitude':
            assert r'fT/cm/\sqrt{Hz}' in ylabel, ylabel
        else:
            assert estimate == 'power'
            assert '(fT/cm)²/Hz' in ylabel, ylabel
        ylabel = fig.axes[0].get_ylabel()
        if estimate == 'amplitude':
            assert r'fT/\sqrt{Hz}' in ylabel
        else:
            assert 'fT²/Hz' in ylabel
    # test reject_by_annotation
    raw = _get_raw()
    raw.set_annotations(Annotations([1, 5], [3, 3], ['test', 'test']))
    raw.plot_psd(reject_by_annotation=True)
    raw.plot_psd(reject_by_annotation=False)
    plt.close('all')

    # test fmax value checking
    with pytest.raises(ValueError, match='not exceed one half the sampling'):
        raw.plot_psd(fmax=50000)

    # test xscale value checking
    with pytest.raises(ValueError, match="Invalid value for the 'xscale'"):
        raw.plot_psd(xscale='blah')

    # gh-5046
    raw = read_raw_fif(raw_fname, preload=True).crop(0, 1)
    picks = pick_types(raw.info)
    raw.plot_psd(picks=picks, average=False)
    raw.plot_psd(picks=picks, average=True)
    plt.close('all')
    raw.set_channel_types({'MEG 0113': 'hbo', 'MEG 0112': 'hbr',
                           'MEG 0122': 'fnirs_raw', 'MEG 0123': 'fnirs_od'},
                          verbose='error')
    fig = raw.plot_psd()
    assert len(fig.axes) == 10
    plt.close('all')

    # gh-7631
    data = 1e-3 * np.random.rand(2, 100)
    info = create_info(['CH1', 'CH2'], 100)
    raw = RawArray(data, info)
    picks = pick_types(raw.info, misc=True)
    raw.plot_psd(picks=picks, spatial_colors=False)
    plt.close('all')
示例#43
0
###############################################################################
# First we import what we need for this example.
import numpy as np
import mne

from pyprep.noisy import Noisydata

###############################################################################
# Now let's make some arbitrary MNE raw object for demonstration purposes.

sfreq = 1000.
n_chans = 6
ch_names = ['Fpz', 'Fz', 'FCz', 'Cz', 'Pz', 'Oz']

info = mne.create_info(ch_names=ch_names,
                       sfreq=sfreq,
                       ch_types=['eeg'] * n_chans)

time = np.arange(0, 60, 1. / sfreq)  # 60 seconds of recording
X = np.random.random((n_chans, time.shape[0]))
raw = mne.io.RawArray(X, info)
print(raw)

###############################################################################
# Assign the mne object to the :class:`Noisydata` class. The resulting object
# will be the place where all following methods are performed.

nd = Noisydata(raw)

###############################################################################
# Find all bad channels and print a summary
示例#44
0
data_cls = np.asarray(cls_all)
data_pln = np.asarray(pln_all)

# Setup data for epochs and cross validation
X = np.vstack([data_cls, data_pln])
y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])
cv = StratifiedKFold(n_splits=7, shuffle=True)

# Create epochs to use for classification
n_trial, n_chan, n_time = X.shape
events = np.vstack((range(n_trial), np.zeros(n_trial, int), y.astype(int))).T
chan_names = ['MEG %i' % chan for chan in range(n_chan)]
chan_types = ['mag'] * n_chan
sfreq = 250
info = create_info(chan_names, sfreq, chan_types)
epochs = EpochsArray(data=X, info=info, events=events, verbose=False)
epochs.times = selected_times[:n_time]

# make classifier
clf = LogisticRegression(C=0.0001)

# fit model and score
gat = GeneralizationAcrossTime(
    scorer="roc_auc", cv=cv, predict_method="predict")
gat.fit(epochs, y=y)
gat.score(epochs, y=y)

# Save model
joblib.dump(gat, data_path + "decode_time_gen/gat_deg_bin.jl")
示例#45
0
def test_annotations():
    """Test annotation class."""
    raw = read_raw_fif(fif_fname)
    onset = np.array(range(10))
    duration = np.ones(10)
    description = np.repeat('test', 10)
    dt = datetime.utcnow()
    meas_date = raw.info['meas_date']
    # Test time shifts.
    for orig_time in [None, dt, meas_date[0], meas_date]:
        annot = Annotations(onset, duration, description, orig_time)

    assert_raises(ValueError, Annotations, onset, duration, description[:9])
    assert_raises(ValueError, Annotations, [onset, 1], duration, description)
    assert_raises(ValueError, Annotations, onset, [duration, 1], description)

    # Test combining annotations with concatenate_raws
    raw2 = raw.copy()
    orig_time = (meas_date[0] + meas_date[1] * 0.000001 +
                 raw2.first_samp / raw2.info['sfreq'])
    annot = Annotations(onset, duration, description, orig_time)
    raw2.annotations = annot
    assert_array_equal(raw2.annotations.onset, onset)
    concatenate_raws([raw, raw2])
    raw.annotations.delete(-1)  # remove boundary annotation
    assert_array_almost_equal(onset + 20., raw.annotations.onset, decimal=2)
    assert_array_equal(annot.duration, raw.annotations.duration)
    assert_array_equal(raw.annotations.description, np.repeat('test', 10))

    # Test combining with RawArray and orig_times
    data = np.random.randn(2, 1000) * 10e-12
    sfreq = 100.
    info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2,
                       sfreq=sfreq)
    info['meas_date'] = 0
    raws = []
    for i, fs in enumerate([12300, 100, 12]):
        raw = RawArray(data.copy(), info, first_samp=fs)
        ants = Annotations([1., 2.], [.5, .5], 'x', fs / sfreq)
        raw.annotations = ants
        raws.append(raw)
    raw = RawArray(data.copy(), info)
    raw.annotations = Annotations([1.], [.5], 'x', None)
    raws.append(raw)
    raw = concatenate_raws(raws)
    boundary_idx = np.where(raw.annotations.description == 'BAD boundary')[0]
    assert_equal(len(boundary_idx), 3)
    raw.annotations.delete(boundary_idx)
    assert_array_equal(raw.annotations.onset, [1., 2., 11., 12., 21., 22.,
                                               31.])
    raw.annotations.delete(2)
    assert_array_equal(raw.annotations.onset, [1., 2., 12., 21., 22., 31.])
    raw.annotations.append(5, 1.5, 'y')
    assert_array_equal(raw.annotations.onset, [1., 2., 12., 21., 22., 31., 5])
    assert_array_equal(raw.annotations.duration, [.5, .5, .5, .5, .5, .5, 1.5])
    assert_array_equal(raw.annotations.description, ['x', 'x', 'x', 'x', 'x',
                                                     'x', 'y'])

    # Test concatenating annotations with and without orig_time.
    raw = read_raw_fif(fif_fname)
    last_time = raw.last_samp / raw.info['sfreq']
    raw2 = raw.copy()
    raw.annotations = Annotations([45.], [3], 'test', raw.info['meas_date'])
    raw2.annotations = Annotations([2.], [3], 'BAD', None)
    raw = concatenate_raws([raw, raw2])
    raw.annotations.delete(-1)  # remove boundary annotation
    assert_array_almost_equal(raw.annotations.onset, [45., 2. + last_time],
                              decimal=2)
示例#46
0
def test_resample():
    """Test resample (with I/O and multiple files)
    """
    tempdir = _TempDir()
    raw = Raw(fif_fname).crop(0, 3, False)
    raw.load_data()
    raw_resamp = raw.copy()
    sfreq = raw.info['sfreq']
    # test parallel on upsample
    raw_resamp.resample(sfreq * 2, n_jobs=2, npad='auto')
    assert_equal(raw_resamp.n_times, len(raw_resamp.times))
    raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
    raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True)
    assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
    assert_equal(raw.n_times, raw_resamp.n_times / 2)
    assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
    assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
    # test non-parallel on downsample
    raw_resamp.resample(sfreq, n_jobs=1, npad='auto')
    assert_equal(raw_resamp.info['sfreq'], sfreq)
    assert_equal(raw._data.shape, raw_resamp._data.shape)
    assert_equal(raw.first_samp, raw_resamp.first_samp)
    assert_equal(raw.last_samp, raw.last_samp)
    # upsampling then downsampling doubles resampling error, but this still
    # works (hooray). Note that the stim channels had to be sub-sampled
    # without filtering to be accurately preserved
    # note we have to treat MEG and EEG+STIM channels differently (tols)
    assert_allclose(raw._data[:306, 200:-200],
                    raw_resamp._data[:306, 200:-200],
                    rtol=1e-2, atol=1e-12)
    assert_allclose(raw._data[306:, 200:-200],
                    raw_resamp._data[306:, 200:-200],
                    rtol=1e-2, atol=1e-7)

    # now check multiple file support w/resampling, as order of operations
    # (concat, resample) should not affect our data
    raw1 = raw.copy()
    raw2 = raw.copy()
    raw3 = raw.copy()
    raw4 = raw.copy()
    raw1 = concatenate_raws([raw1, raw2])
    raw1.resample(10., npad='auto')
    raw3.resample(10., npad='auto')
    raw4.resample(10., npad='auto')
    raw3 = concatenate_raws([raw3, raw4])
    assert_array_equal(raw1._data, raw3._data)
    assert_array_equal(raw1._first_samps, raw3._first_samps)
    assert_array_equal(raw1._last_samps, raw3._last_samps)
    assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
    assert_equal(raw1.first_samp, raw3.first_samp)
    assert_equal(raw1.last_samp, raw3.last_samp)
    assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])

    # test resampling of stim channel

    # basic decimation
    stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    assert_allclose(raw.resample(8., npad='auto')._data,
                    [[1, 1, 0, 0, 1, 1, 0, 0]])

    # decimation of multiple stim channels
    raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
    assert_allclose(raw.resample(8., npad='auto')._data,
                    [[1, 1, 0, 0, 1, 1, 0, 0],
                     [1, 1, 0, 0, 1, 1, 0, 0]])

    # decimation that could potentially drop events if the decimation is
    # done naively
    stim = [0, 0, 0, 1, 1, 0, 0, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    assert_allclose(raw.resample(4., npad='auto')._data,
                    [[0, 1, 1, 0]])

    # two events are merged in this case (warning)
    stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw.resample(8., npad='auto')
        assert_true(len(w) == 1)

    # events are dropped in this case (warning)
    stim = [0, 1, 1, 0, 0, 1, 1, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw.resample(4., npad='auto')
        assert_true(len(w) == 1)

    # test resampling events: this should no longer give a warning
    stim = [0, 1, 1, 0, 0, 1, 1, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    events = find_events(raw)
    raw, events = raw.resample(4., events=events, npad='auto')
    assert_equal(events, np.array([[0, 0, 1], [2, 0, 1]]))

    # test copy flag
    stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    raw_resampled = raw.resample(4., npad='auto', copy=True)
    assert_true(raw_resampled is not raw)
    raw_resampled = raw.resample(4., npad='auto', copy=False)
    assert_true(raw_resampled is raw)

    # resample should still work even when no stim channel is present
    raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
    raw.info['lowpass'] = 50.
    raw.resample(10, npad='auto')
    assert_equal(raw.info['lowpass'], 5.)
    assert_equal(len(raw), 10)
示例#47
0
文件: testing.py 项目: camasa96/eeg
    # read event file
    labels= pd.read_csv(events_fname)
    clean=data.drop(['id' ], axis=1)#remove id
    labels=labels.drop(['id' ], axis=1)#remove id
    return  clean,labels

data,labels =  prepare_data_train('/Users/camasa/Documents/University/Grad/DM_Pro/train/subj1_series2_data.csv')


data   =np.asarray(data.astype(float))
labels =np.asarray(labels.astype(float))


# Some information about the channels
ch_names = ['Fp1', 'Fp2','F7','F3','Fz','F4','F8','FC5','FC1','FC2','FC6','T7',
            'C3','Cz','C4','T8','TP9','CP5','CP1','CP2','CP6','TP10','P7','P3','Pz',
            'P4','P8','PO9','O1','Oz','O2','PO10'] 

# Sampling rate 
sfreq = 500  # Hz
# Create the info structure needed by MNE
info = mne.create_info(ch_names, sfreq, ch_types=  'eeg')

# Finally, create the Raw object
raw = mne.io.RawArray(np.transpose(data), info)
#data, times = raw[:30, int(sfreq * 1):int(sfreq * 3)]
# Plot it!
#plt.plot(times, data.T)
#data.plot()

#__reset__()
示例#48
0
import numpy as np
import mne
import os
dir_path = os.getcwd()
# Read the CSV file as a NumPy array
data = np.loadtxt(dir_path + '/tmp/edf.csv', delimiter=',')

# Some information about the channels
ch_names = ['CH 1', 'CH 2', 'CH 3']  # TODO: finish this list

# Sampling rate of the Nautilus machine
sfreq = 500  # Hz

# Create the info structure needed by MNE
info = mne.create_info(ch_names, sfreq)

# Finally, create the Raw object
raw = mne.io.RawArray(data, info)

# Plot it!
raw.plot()
示例#49
0
mat = loadmat(mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat')
ch_names = mat['ch_names'].tolist()
elec = mat['elec']  # electrode positions given in meters
# Now we make a montage stating that the sEEG contacts are in head
# coordinate system (although they are in MRI). This is compensated
# by the fact that below we do not specicty a trans file so the Head<->MRI
# transform is the identity.
montage = mne.channels.make_dig_montage(ch_pos=dict(zip(ch_names, elec)),
                                        coord_frame='head')
print('Created %s channel positions' % len(ch_names))

###############################################################################
# Now that we have our electrode positions in MRI coordinates, we can create
# our measurement info structure.

info = mne.create_info(ch_names, 1000., 'ecog', montage=montage)

###############################################################################
# We can then plot the locations of our electrodes on our subject's brain.
#
# .. note:: These are not real electrodes for this subject, so they
#           do not align to the cortical surface perfectly.

subjects_dir = mne.datasets.sample.data_path() + '/subjects'
fig = plot_alignment(info,
                     subject='sample',
                     subjects_dir=subjects_dir,
                     surfaces=['pial'])
mne.viz.set_3d_view(fig, 200, 70)

###############################################################################
示例#50
0
def make_random_mne_object(sfreq=1000.0,
                           t_secs=600,
                           n_freq_comps=5,
                           freq_range=[10, 60]):
    """Make a random MNE object to use for testing.

    Parameters
    ----------
    sfreq : float
        Sampling frequency in Hz.

    t_secs : int
        Recording length in seconds.

    n_freq_comps : int
        Number of signal components summed to make a signal.

    freq_range : list, len==2
        Signals will contain freqs from this range.

    Returns
    -------
    raw : mne raw object
        The mne object for performing the tests.

    n_freq_comps : int

    freq_range : list, len==2

    """
    t = np.arange(0, t_secs, 1.0 / sfreq)
    signal_len = t.shape[0]
    ch_names = [
        "Fpz",
        "AFz",
        "Fz",
        "FCz",
        "Cz",
        "CPz",
        "Pz",
        "POz",
        "Oz",
        "C1",
        "C2",
        "C3",
        "C4",
        "C5",
        "C6",
    ]
    ch_types = ["eeg" for chn in ch_names]
    n_chans = len(ch_names)

    # Make a random signal
    signal = np.zeros((n_chans, signal_len))
    low = freq_range[0]
    high = freq_range[1]
    for chan in range(n_chans):
        # Each channel signal is a sum of random freq sine waves
        for freq_i in range(n_freq_comps):
            freq = RNG.randint(low, high, signal_len)
            signal[chan, :] += np.sin(2 * np.pi * t * freq)

    signal *= 1e-6  # scale to Volts

    # Make mne object
    info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
    raw = mne.io.RawArray(signal, info)
    return raw, n_freq_comps, freq_range
    for t, w_time in enumerate(centered_w_times):

        # Center the min and max of the window
        w_tmin = w_time - w_size / 2.
        w_tmax = w_time + w_size / 2.

        # Crop data into time-window of interest
        X = epochs.copy().crop(w_tmin, w_tmax).get_data()

        # Save mean scores over folds for each frequency and time window
        tf_scores[freq, t] = np.mean(cross_val_score(estimator=clf,
                                                     X=X,
                                                     y=y,
                                                     scoring='roc_auc',
                                                     cv=cv,
                                                     n_jobs=1),
                                     axis=0)

###############################################################################
# Plot time-frequency results

# Set up time frequency object
av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :],
                    centered_w_times, freqs[1:], 1)

chance = np.mean(y)  # set chance level to white in the plot
av_tfr.plot([0],
            vmin=chance,
            title="Time-Frequency Decoding Scores",
            cmap=plt.cm.Reds)
示例#52
0
fs = 1000

filename = data_dir + 'fingerflex/data/' + str(sid) + '/1_fingerflex.mat'
mat = scipy.io.loadmat(filename)
data = np.transpose(mat['data'])  # (46, 610040)
chn_num = data.shape[0]
flex = np.transpose(mat['flex'])  #(5, 610040)
cue = np.transpose(mat['cue'])  # (1, 610040)
data = np.concatenate((data, cue, flex), axis=0)  # (47, 610040) / (52, 610040)

chn_names = np.append(["ecog"] * chn_num,
                      ["stim", "thumb", "index", "middle", "ring", "little"])
chn_types = np.append(["ecog"] * chn_num,
                      ["stim", "emg", "emg", "emg", "emg", "emg"])
info = mne.create_info(ch_names=list(chn_names),
                       ch_types=list(chn_types),
                       sfreq=fs)
raw = mne.io.RawArray(data, info)

events = mne.find_events(raw, stim_channel='stim')
events = events - [0, 0, 1]
'''
verify the events are picked up correctly.
a=np.asarray([i for i in events if i[2]==1])
fig,ax=plt.subplots()
ax.plot(cue[0,:111080])
for i in a[:6]:
    ax.axvline(x=i[0],linewidth=1,color='r',linestyle='--')
'''
event1 = events[(events[:, 2] == 0)]
event2 = events[(events[:, 2] == 1)]
示例#53
0
        g2 = np.swapaxes(
            np.array([[
                y_df.query(
                    'metric_type=="{}" & subj_id=="{}" & channel=="{}"'.format(
                        metric_type, s, ch))['env'].values for ch in CHANNELS
            ] for s in y_df.query('fb_type=="FBMock"')['subj_id'].unique()]),
            2, 1)

        from mne.stats import spatio_temporal_cluster_test

        from mne import create_info
        from mne.channels import read_montage, find_ch_connectivity

        cnk = find_ch_connectivity(
            create_info(CHANNELS, 250, 'eeg', read_montage('standard_1005')),
            'eeg')[0]

        t_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(
            [g1, g2], 138, stat_fun=rankstat, tail=1, connectivity=cnk)
        cluster_pv

        good_cluster_inds = np.where(cluster_pv < 0.05)[0]

        for i_clu, clu_idx in enumerate(good_cluster_inds[:10]):
            # unpack cluster information, get unique indices
            time_inds, space_inds = np.squeeze(clusters[clu_idx])
            ch_inds = np.unique(space_inds)
            time_inds = np.unique(time_inds)

            # get topography for F stat
示例#54
0
                                eeg=True,
                                mindist=5.0,
                                n_jobs=None)
print(fwd)

##############################################################################
# From here on, standard inverse imaging methods can be used!
#
# Infant MRI surrogates
# ---------------------
# We don't have a sample infant dataset for MNE, so let's fake a 10-20 one:

ch_names = \
    'Fz Cz Pz Oz Fp1 Fp2 F3 F4 F7 F8 C3 C4 T7 T8 P3 P4 P7 P8 O1 O2'.split()
data = np.random.RandomState(0).randn(len(ch_names), 1000)
info = mne.create_info(ch_names, 1000., 'eeg')
raw = mne.io.RawArray(data, info)

##############################################################################
# Get an infant MRI template
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
# To use an infant head model for M/EEG data, you can use
# :func:`mne.datasets.fetch_infant_template` to download an infant template:

subject = mne.datasets.fetch_infant_template('6mo', subjects_dir, verbose=True)

##############################################################################
# It comes with several helpful built-in files, including a 10-20 montage
# in the MRI coordinate frame, which can be used to compute the
# MRI<->head transform ``trans``:
fname_1020 = op.join(subjects_dir, subject, 'montages', '10-20-montage.fif')
示例#55
0
cos = np.cos(times * 10)
sinX2 = sin * 2
cosX2 = cos * 2

# Numpy array of size 4 X 10000.
data = np.array([sin, cos, sinX2, cosX2])

# Definition of channel types and names.
ch_types = ['mag', 'mag', 'grad', 'grad']
ch_names = ['sin', 'cos', 'sinX2', 'cosX2']

###############################################################################
# Create an :class:`info <mne.Info>` object.

# It is also possible to use info from another raw object.
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)

###############################################################################
# Create a dummy :class:`mne.io.RawArray` object

raw = mne.io.RawArray(data, info)

# Scaling of the figure.
# For actual EEG/MEG data different scaling factors should be used.
scalings = {'mag': 2, 'grad': 2}

raw.plot(n_channels=4,
         scalings=scalings,
         title='Data from arrays',
         show=True,
         block=True)
示例#56
0
def test_rejection(buffer_size):
    """Test rejection."""
    event_id, tmin, tmax = 1, 0.0, 0.5
    sfreq = 1000
    ch_names = ['Fz', 'Cz', 'Pz', 'STI 014']
    raw_tmax = 5
    info = create_info(ch_names=ch_names,
                       sfreq=sfreq,
                       ch_types=['eeg', 'eeg', 'eeg', 'stim'])
    raw_array = np.random.randn(len(ch_names), raw_tmax * sfreq)
    raw_array[-1, :] = 0
    epoch_start_samples = np.arange(raw_tmax) * sfreq
    raw_array[-1, epoch_start_samples] = event_id

    reject_threshold = np.max(raw_array) - np.min(raw_array) + 1
    reject = {'eeg': reject_threshold}
    epochs_to_reject = [1, 3]
    epochs_to_keep = np.setdiff1d(np.arange(len(epoch_start_samples)),
                                  epochs_to_reject)
    expected_drop_log = [tuple() for _ in range(len(epoch_start_samples))]
    for cur_epoch in epochs_to_reject:
        raw_array[1, epoch_start_samples[cur_epoch]] = reject_threshold + 1
        expected_drop_log[cur_epoch] = (ch_names[1], )
    expected_drop_log = tuple(expected_drop_log)

    raw = RawArray(raw_array, info)
    events = find_events(raw, shortest_event=1, initial_event=True)
    picks = pick_types(raw.info, eeg=True)
    epochs = Epochs(raw,
                    events,
                    event_id=event_id,
                    tmin=tmin,
                    tmax=tmax,
                    baseline=None,
                    picks=picks,
                    preload=True,
                    reject=reject)
    epochs_data = epochs.get_data()

    assert len(epochs) == len(epoch_start_samples) - len(epochs_to_reject)
    assert_array_equal(epochs_data[:, 1, 0],
                       raw_array[1, epoch_start_samples[epochs_to_keep]])
    assert epochs.drop_log == expected_drop_log
    assert_array_equal(epochs.selection, epochs_to_keep)

    rt_client = MockRtClient(raw)

    rt_epochs = RtEpochs(rt_client,
                         event_id,
                         tmin,
                         tmax,
                         picks=picks,
                         baseline=None,
                         isi_max=0.5,
                         find_events=dict(initial_event=True),
                         reject=reject)

    rt_epochs.start()
    rt_client.send_data(rt_epochs,
                        picks,
                        tmin=0,
                        tmax=raw_tmax,
                        buffer_size=buffer_size)

    assert len(rt_epochs) == len(epochs_to_keep)
    assert rt_epochs.drop_log == expected_drop_log
    assert_array_equal(rt_epochs.selection, epochs_to_keep)
    rt_data = rt_epochs.get_data()
    assert rt_data.shape == epochs_data.shape
    assert_array_equal(rt_data, epochs_data)
示例#57
0
def test_find_events():
    """Test find events in raw file."""
    events = read_events(fname)
    raw = read_raw_fif(raw_fname, preload=True)
    # let's test the defaulting behavior while we're at it
    extra_ends = ['', '_1']
    orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
    os.environ['MNE_STIM_CHANNEL'] = 'STI 014'
    if 'MNE_STIM_CHANNEL_1' in os.environ:
        del os.environ['MNE_STIM_CHANNEL_1']
    events2 = find_events(raw)
    assert_array_almost_equal(events, events2)
    # now test with mask
    events11 = find_events(raw, mask=3, mask_type='not_and')
    with pytest.warns(RuntimeWarning, match='events masked'):
        events22 = read_events(fname, mask=3, mask_type='not_and')
    assert_array_equal(events11, events22)

    # Reset some data for ease of comparison
    raw._first_samps[0] = 0
    raw.info['sfreq'] = 1000
    raw._update_times()

    stim_channel = 'STI 014'
    stim_channel_idx = pick_channels(raw.info['ch_names'],
                                     include=[stim_channel])

    # test digital masking
    raw._data[stim_channel_idx, :5] = np.arange(5)
    raw._data[stim_channel_idx, 5:] = 0
    # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100'

    pytest.raises(TypeError, find_events, raw, mask="0", mask_type='and')
    pytest.raises(ValueError, find_events, raw, mask=0, mask_type='blah')
    # testing mask_type. default = 'not_and'
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=1, mask_type='not_and'),
        [[2, 0, 2], [4, 2, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=2, mask_type='not_and'),
        [[1, 0, 1], [3, 0, 1], [4, 1, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=3, mask_type='not_and'),
        [[4, 0, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=4, mask_type='not_and'),
        [[1, 0, 1], [2, 1, 2], [3, 2, 3]])
    # testing with mask_type = 'and'
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=1, mask_type='and'),
        [[1, 0, 1], [3, 0, 1]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=2, mask_type='and'),
        [[2, 0, 2]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=3, mask_type='and'),
        [[1, 0, 1], [2, 1, 2], [3, 2, 3]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=4, mask_type='and'),
        [[4, 0, 4]])

    # test empty events channel
    raw._data[stim_channel_idx, :] = 0
    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))

    raw._data[stim_channel_idx, :4] = 1
    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))

    raw._data[stim_channel_idx, -1:] = 9
    assert_array_equal(find_events(raw), [[14399, 0, 9]])

    # Test that we can handle consecutive events with no gap
    raw._data[stim_channel_idx, 10:20] = 5
    raw._data[stim_channel_idx, 20:30] = 6
    raw._data[stim_channel_idx, 30:32] = 5
    raw._data[stim_channel_idx, 40] = 6

    assert_array_equal(find_events(raw, consecutive=False),
                       [[10, 0, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(
        find_events(raw, consecutive=True),
        [[10, 0, 5], [20, 5, 6], [30, 6, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw),
                       [[10, 0, 5], [20, 5, 6], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw, output='offset', consecutive=False),
                       [[31, 0, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(
        find_events(raw, output='offset', consecutive=True),
        [[19, 6, 5], [29, 5, 6], [31, 0, 5], [40, 0, 6], [14399, 0, 9]])
    pytest.raises(ValueError,
                  find_events,
                  raw,
                  output='step',
                  consecutive=True)
    assert_array_equal(
        find_events(raw, output='step', consecutive=True, shortest_event=1),
        [[10, 0, 5], [20, 5, 6], [30, 6, 5], [32, 5, 0], [40, 0, 6],
         [41, 6, 0], [14399, 0, 9], [14400, 9, 0]])
    assert_array_equal(find_events(raw, output='offset'),
                       [[19, 6, 5], [31, 0, 6], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002),
                       [[10, 0, 5]])
    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002),
                       [[10, 0, 5], [20, 5, 6], [30, 6, 5]])
    assert_array_equal(
        find_events(raw,
                    output='offset',
                    consecutive=False,
                    min_duration=0.002), [[31, 0, 5]])
    assert_array_equal(
        find_events(raw, output='offset', consecutive=True,
                    min_duration=0.002), [[19, 6, 5], [29, 5, 6], [31, 0, 5]])
    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003),
                       [[10, 0, 5], [20, 5, 6]])

    # test find_stim_steps merge parameter
    raw._data[stim_channel_idx, :] = 0
    raw._data[stim_channel_idx, 0] = 1
    raw._data[stim_channel_idx, 10] = 4
    raw._data[stim_channel_idx, 11:20] = 5
    assert_array_equal(
        find_stim_steps(raw, pad_start=0, merge=0, stim_channel=stim_channel),
        [[0, 0, 1], [1, 1, 0], [10, 0, 4], [11, 4, 5], [20, 5, 0]])
    assert_array_equal(
        find_stim_steps(raw, merge=-1, stim_channel=stim_channel),
        [[1, 1, 0], [10, 0, 5], [20, 5, 0]])
    assert_array_equal(
        find_stim_steps(raw, merge=1, stim_channel=stim_channel),
        [[1, 1, 0], [11, 0, 5], [20, 5, 0]])

    # put back the env vars we trampled on
    for s, o in zip(extra_ends, orig_envs):
        if o is not None:
            os.environ['MNE_STIM_CHANNEL%s' % s] = o

    # Test with list of stim channels
    raw._data[stim_channel_idx, 1:101] = np.zeros(100)
    raw._data[stim_channel_idx, 10:11] = 1
    raw._data[stim_channel_idx, 30:31] = 3
    stim_channel2 = 'STI 015'
    stim_channel2_idx = pick_channels(raw.info['ch_names'],
                                      include=[stim_channel2])
    raw._data[stim_channel2_idx, :] = 0
    raw._data[stim_channel2_idx, :100] = raw._data[stim_channel_idx, 5:105]
    events1 = find_events(raw, stim_channel='STI 014')
    events2 = events1.copy()
    events2[:, 0] -= 5
    events = find_events(raw, stim_channel=['STI 014', stim_channel2])
    assert_array_equal(events[::2], events2)
    assert_array_equal(events[1::2], events1)

    # test initial_event argument
    info = create_info(['MYSTI'], 1000, 'stim')
    data = np.zeros((1, 1000))
    raw = RawArray(data, info)
    data[0, :10] = 100
    data[0, 30:40] = 200
    assert_array_equal(find_events(raw, 'MYSTI'), [[30, 0, 200]])
    assert_array_equal(find_events(raw, 'MYSTI', initial_event=True),
                       [[0, 0, 100], [30, 0, 200]])
示例#58
0
list_el = [str('PZ'), str('PO5'), str('PO3'), str('POz'), str('PO4'), str('PO6'), str('O1'), str('Oz'),
           str('O2')]  # Electrodes to use
vec_ind_el = df_location[df_location['Label'].isin(list_el)].index  # Vector with indexes of electrodes to use
vec_ind_el = df_location['Label'].index
ind_ref_el = df_location['Electrode'][df_location['Label'] == 'Cz'].index[0]  # Index of reference electrode 'Cz'
ind_oz = df_location['Electrode'][df_location['Label'] == 'Oz'].index[0]
ind_frq = 7

## Load and create Montage
df_montage = pd.read_csv(os.path.join(dir_data, 'montage.DAT'), sep='\t', header=None)
df_montage.columns = ['Channel', 'x', 'y', 'z']
df_montage['Channel'] = df_montage['Channel'].str.strip()
mne_montage = mne.channels.make_dig_montage(df_montage[3:])
mne_montage.ch_names = df_location['Label'].tolist()
n_channels = len(df_montage['Channel'][3:])
fake_info = mne.create_info(ch_names=mne_montage.ch_names, sfreq=250.,
                            ch_types='eeg')

N_pre = int(0.5 * 250)  # pre stim
N_delay = int(0.140 * 250)  # SSVEP delay
N_stim = int(5 * 250)  # stimulation
N_start = N_pre + N_delay - 1
N_stop = N_start + N_stim

### Plot average over all subjects
Nb = 6
Ns = 35
Nf = 40
Ne = 9

mat_proc = np.zeros([Ns * Nb, len(vec_ind_el), N_stim])
s = 0
示例#59
0
 def _to_mne():
     x_mne = []
     for k in range(n_suj):
         info = create_info(roi[k].tolist(), sf)
         x_mne += [EpochsArray(x[k], info, tmin=times[0], verbose=False)]
     return x_mne
示例#60
0
def test_montage():
    """Test making montages."""
    tempdir = _TempDir()
    inputs = dict(
        sfp='FidNz 0       9.071585155     -2.359754454\n'
        'FidT9 -6.711765       0.040402876     -3.251600355\n'
        'very_very_very_long_name -5.831241498 -4.494821698  4.955347697\n'
        'Cz 0       0       8.899186843',
        csd=
        '// MatLab   Sphere coordinates [degrees]         Cartesian coordinates\n'  # noqa: E501
        '// Label       Theta       Phi    Radius         X         Y         Z       off sphere surface\n'  # noqa: E501
        'E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011\n'  # noqa: E501
        'E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000\n'  # noqa: E501
        'E31      90.000     -11.000       1.000    0.0000    0.9816   -0.1908   0.00000000000000000\n'  # noqa: E501
        'E61     158.000     -17.200       1.000   -0.8857    0.3579   -0.2957  -0.00000000000000022',  # noqa: E501
        mm_elc=
        '# ASA electrode file\nReferenceLabel  avg\nUnitPosition    mm\n'  # noqa:E501
        'NumberPositions=    68\n'
        'Positions\n'
        '-86.0761 -19.9897 -47.9860\n'
        '85.7939 -20.0093 -48.0310\n'
        '0.0083 86.8110 -39.9830\n'
        '-86.0761 -24.9897 -67.9860\n'
        'Labels\nLPA\nRPA\nNz\nDummy\n',
        m_elc='# ASA electrode file\nReferenceLabel  avg\nUnitPosition    m\n'
        'NumberPositions=    68\nPositions\n-.0860761 -.0199897 -.0479860\n'  # noqa:E501
        '.0857939 -.0200093 -.0480310\n.0000083 .00868110 -.0399830\n'
        '.08 -.02 -.04\n'
        'Labels\nLPA\nRPA\nNz\nDummy\n',
        txt='Site  Theta  Phi\n'
        'Fp1  -92    -72\n'
        'Fp2   92     72\n'
        'very_very_very_long_name       -92     72\n'
        'O2        92    -90\n',
        elp='346\n'
        'EEG\t      F3\t -62.027\t -50.053\t      85\n'
        'EEG\t      Fz\t  45.608\t      90\t      85\n'
        'EEG\t      F4\t   62.01\t  50.103\t      85\n'
        'EEG\t      FCz\t   68.01\t  58.103\t      85\n',
        hpts='eeg Fp1 -95.0 -3. -3.\n'
        'eeg AF7 -1 -1 -3\n'
        'eeg A3 -2 -2 2\n'
        'eeg A 0 0 0',
        bvef='<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
        '<!-- Generated by EasyCap Configurator 19.05.2014 -->\n'
        '<Electrodes defaults="false">\n'
        '  <Electrode>\n'
        '    <Name>Fp1</Name>\n'
        '    <Theta>-90</Theta>\n'
        '    <Phi>-72</Phi>\n'
        '    <Radius>1</Radius>\n'
        '    <Number>1</Number>\n'
        '  </Electrode>\n'
        '  <Electrode>\n'
        '    <Name>Fz</Name>\n'
        '    <Theta>45</Theta>\n'
        '    <Phi>90</Phi>\n'
        '    <Radius>1</Radius>\n'
        '    <Number>2</Number>\n'
        '  </Electrode>\n'
        '  <Electrode>\n'
        '    <Name>F3</Name>\n'
        '    <Theta>-60</Theta>\n'
        '    <Phi>-51</Phi>\n'
        '    <Radius>1</Radius>\n'
        '    <Number>3</Number>\n'
        '  </Electrode>\n'
        '  <Electrode>\n'
        '    <Name>F7</Name>\n'
        '    <Theta>-90</Theta>\n'
        '    <Phi>-36</Phi>\n'
        '    <Radius>1</Radius>\n'
        '    <Number>4</Number>\n'
        '  </Electrode>\n'
        '</Electrodes>',
    )
    # Get actual positions and save them for checking
    # csd comes from the string above, all others come from commit 2fa35d4
    poss = dict(
        sfp=[[0.0, 9.07159, -2.35975], [-6.71176, 0.0404, -3.2516],
             [-5.83124, -4.49482, 4.95535], [0.0, 0.0, 8.89919]],
        mm_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
                [1e-05, 0.08681, -0.03998], [-0.08608, -0.02499, -0.06799]],
        m_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
               [1e-05, 0.00868, -0.03998], [0.08, -0.02, -0.04]],
        txt=[[-26.25044, 80.79056, -2.96646], [26.25044, 80.79056, -2.96646],
             [-26.25044, -80.79056, -2.96646], [0.0, -84.94822, -2.96646]],
        elp=[[-48.20043, 57.55106, 39.86971], [0.0, 60.73848, 59.4629],
             [48.1426, 57.58403, 39.89198], [41.64599, 66.91489, 31.8278]],
        hpts=[[-95, -3, -3], [-1, -1., -3.], [-2, -2, 2.], [0, 0, 0]],
        bvef=[[-26.266444, 80.839803, 5.204748e-15],
              [3.680313e-15, 60.104076, 60.104076],
              [-46.325632, 57.207392, 42.500000],
              [-68.766444, 49.961746, 5.204748e-15]],
    )
    for key, text in inputs.items():
        kind = key.split('_')[-1]
        fname = op.join(tempdir, 'test.' + kind)
        with open(fname, 'w') as fid:
            fid.write(text)
        montage = read_montage(fname)
        if kind in ('sfp', 'txt'):
            assert ('very_very_very_long_name' in montage.ch_names)
        assert_equal(len(montage.ch_names), 4)
        assert_equal(len(montage.ch_names), len(montage.pos))
        assert_equal(montage.pos.shape, (4, 3))
        assert_equal(montage.kind, 'test')
        if kind == 'csd':
            dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
                     ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
                     ('off_sph', 'f8')]
            try:
                table = np.loadtxt(fname, skip_header=2, dtype=dtype)
            except TypeError:
                table = np.loadtxt(fname, skiprows=2, dtype=dtype)
            poss['csd'] = np.c_[table['x'], table['y'], table['z']]
        if kind == 'elc':
            # Make sure points are reasonable distance from geometric centroid
            centroid = np.sum(montage.pos, axis=0) / montage.pos.shape[0]
            distance_from_centroid = np.apply_along_axis(
                np.linalg.norm, 1, montage.pos - centroid)
            assert_array_less(distance_from_centroid, 0.2)
            assert_array_less(0.01, distance_from_centroid)
        assert_array_almost_equal(poss[key], montage.pos, 4, err_msg=key)

    # Test reading in different letter case.
    ch_names = [
        "F3", "FZ", "F4", "FC3", "FCz", "FC4", "C3", "CZ", "C4", "CP3", "CPZ",
        "CP4", "P3", "PZ", "P4", "O1", "OZ", "O2"
    ]
    montage = read_montage('standard_1020', ch_names=ch_names)
    assert_array_equal(ch_names, montage.ch_names)

    # test transform
    input_strs = [
        """
    eeg Fp1 -95.0 -31.0 -3.0
    eeg AF7 -81 -59 -3
    eeg AF3 -87 -41 28
    cardinal 2 -91 0 -42
    cardinal 1 0 -91 -42
    cardinal 3 0 91 -42
    """, """
    Fp1 -95.0 -31.0 -3.0
    AF7 -81 -59 -3
    AF3 -87 -41 28
    FidNz -91 0 -42
    FidT9 0 -91 -42
    FidT10 0 91 -42
    """
    ]
    # sfp files seem to have Nz, T9, and T10 as fiducials:
    # https://github.com/mne-tools/mne-python/pull/4482#issuecomment-321980611

    kinds = ['test_fid.hpts', 'test_fid.sfp']

    for kind, input_str in zip(kinds, input_strs):
        fname = op.join(tempdir, kind)
        with open(fname, 'w') as fid:
            fid.write(input_str)
        montage = read_montage(op.join(tempdir, kind), transform=True)

        # check coordinate transformation
        pos = np.array([-95.0, -31.0, -3.0])
        nasion = np.array([-91, 0, -42])
        lpa = np.array([0, -91, -42])
        rpa = np.array([0, 91, -42])
        fids = np.vstack((nasion, lpa, rpa))
        trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
        pos = apply_trans(trans, pos)
        assert_array_equal(montage.pos[0], pos)
        assert_array_equal(montage.nasion[[0, 2]], [0, 0])
        assert_array_equal(montage.lpa[[1, 2]], [0, 0])
        assert_array_equal(montage.rpa[[1, 2]], [0, 0])
        pos = np.array([-95.0, -31.0, -3.0])
        montage_fname = op.join(tempdir, kind)
        montage = read_montage(montage_fname, unit='mm')
        assert_array_equal(montage.pos[0], pos * 1e-3)

        # test with last
        info = create_info(montage.ch_names, 1e3,
                           ['eeg'] * len(montage.ch_names))
        _set_montage(info, montage)
        pos2 = np.array([c['loc'][:3] for c in info['chs']])
        assert_array_equal(pos2, montage.pos)
        assert_equal(montage.ch_names, info['ch_names'])

        info = create_info(montage.ch_names, 1e3,
                           ['eeg'] * len(montage.ch_names))

        evoked = EvokedArray(data=np.zeros((len(montage.ch_names), 1)),
                             info=info,
                             tmin=0)

        # test return type as well as set montage
        assert (isinstance(evoked.set_montage(montage), type(evoked)))

        pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
        assert_array_equal(pos3, montage.pos)
        assert_equal(montage.ch_names, evoked.info['ch_names'])

        # Warning should be raised when some EEG are not specified in montage
        info = create_info(montage.ch_names + ['foo', 'bar'], 1e3,
                           ['eeg'] * (len(montage.ch_names) + 2))
        with pytest.warns(RuntimeWarning, match='position specified'):
            _set_montage(info, montage)

    # Channel names can be treated case insensitive
    info = create_info(['FP1', 'af7', 'AF3'], 1e3, ['eeg'] * 3)
    _set_montage(info, montage)

    # Unless there is a collision in names
    info = create_info(['FP1', 'Fp1', 'AF3'], 1e3, ['eeg'] * 3)
    assert (info['dig'] is None)
    with pytest.warns(RuntimeWarning, match='position specified'):
        _set_montage(info, montage)
    assert len(info['dig']) == 5  # 2 EEG w/pos, 3 fiducials
    montage.ch_names = ['FP1', 'Fp1', 'AF3']
    info = create_info(['fp1', 'AF3'], 1e3, ['eeg', 'eeg'])
    assert (info['dig'] is None)
    with pytest.warns(RuntimeWarning, match='position specified'):
        _set_montage(info, montage, set_dig=False)
    assert (info['dig'] is None)

    # test get_pos2d method
    montage = read_montage("standard_1020")
    c3 = montage.get_pos2d()[montage.ch_names.index("C3")]
    c4 = montage.get_pos2d()[montage.ch_names.index("C4")]
    fz = montage.get_pos2d()[montage.ch_names.index("Fz")]
    oz = montage.get_pos2d()[montage.ch_names.index("Oz")]
    f1 = montage.get_pos2d()[montage.ch_names.index("F1")]
    assert (c3[0] < 0)  # left hemisphere
    assert (c4[0] > 0)  # right hemisphere
    assert (fz[1] > 0)  # frontal
    assert (oz[1] < 0)  # occipital
    assert_allclose(fz[0], 0, atol=1e-2)  # midline
    assert_allclose(oz[0], 0, atol=1e-2)  # midline
    assert (f1[0] < 0 and f1[1] > 0)  # left frontal

    # test get_builtin_montages function
    montages = get_builtin_montages()
    assert (len(montages) > 0)  # MNE should always ship with montages
    assert ("standard_1020" in montages)  # 10/20 montage
    assert ("standard_1005" in montages)  # 10/05 montage