예제 #1
0
def _channels_tsv(raw, fname, verbose):
    """Create a channels.tsv file and save it.

    Parameters
    ----------
    raw : instance of Raw
        The data as MNE-Python Raw object.
    fname : str
        Filename to save the channels.tsv to.
    verbose : bool
        Set verbose output to true or false.

    """
    map_chs = defaultdict(lambda: 'OTHER')
    map_chs.update(grad='MEGGRAD', mag='MEGMAG', stim='TRIG', eeg='EEG',
                   ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG', misc='MISC',
                   resp='RESPONSE', ref_meg='REFMEG')
    map_desc = defaultdict(lambda: 'Other type of channel')
    map_desc.update(grad='Gradiometer', mag='Magnetometer',
                    stim='Trigger',
                    eeg='ElectroEncephaloGram',
                    ecog='Electrocorticography',
                    seeg='StereoEEG',
                    ecg='ElectroCardioGram',
                    eog='ElectrOculoGram', misc='Miscellaneous',
                    ref_meg='Reference channel')

    status, ch_type, description = list(), list(), list()
    for idx, ch in enumerate(raw.info['ch_names']):
        status.append('bad' if ch in raw.info['bads'] else 'good')
        ch_type.append(map_chs[channel_type(raw.info, idx)])
        description.append(map_desc[channel_type(raw.info, idx)])
    low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])
    units = [_unit2human.get(ch_i['unit'], 'n/a') for ch_i in raw.info['chs']]
    n_channels = raw.info['nchan']
    sfreq = raw.info['sfreq']

    df = pd.DataFrame(OrderedDict([
                      ('name', raw.info['ch_names']),
                      ('type', ch_type),
                      ('units', units),
                      ('description', description),
                      ('sampling_frequency', ['%.2f' % sfreq] * n_channels),
                      ('low_cutoff', ['%.2f' % low_cutoff] * n_channels),
                      ('high_cutoff', ['%.2f' % high_cutoff] * n_channels),
                      ('status', status)]))
    df.to_csv(fname, sep='\t', index=False)

    if verbose:
        print(os.linesep + "Writing '%s'..." % fname + os.linesep)
        print(df.head())

    return fname
예제 #2
0
def _channel_tsv(raw, fname, verbose):
    """Create channel tsv."""

    map_chs = defaultdict(lambda: 'OTHER')
    map_chs.update(grad='MEGGRAD',
                   mag='MEGMAG',
                   stim='TRIG',
                   eeg='EEG',
                   eog='EOG',
                   ecg='ECG',
                   misc='MISC',
                   ref_meg='REFMEG')
    map_desc = defaultdict(lambda: 'Other type of channel')
    map_desc.update(grad='Gradiometer',
                    mag='Magnetometer',
                    stim='Trigger',
                    eeg='ElectroEncephaloGram',
                    ecg='ElectroCardioGram',
                    eog='ElectrOculoGram',
                    misc='Miscellaneous',
                    ref_meg='Reference channel')

    status, ch_type, description = list(), list(), list()
    for idx, ch in enumerate(raw.info['ch_names']):
        status.append('bad' if ch in raw.info['bads'] else 'good')
        ch_type.append(map_chs[channel_type(raw.info, idx)])
        description.append(map_desc[channel_type(raw.info, idx)])
    low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])
    n_channels = raw.info['nchan']
    sfreq = raw.info['sfreq']
    df = pd.DataFrame({
        'name': raw.info['ch_names'],
        'type': ch_type,
        'description': description,
        'sampling_frequency': ['%.2f' % sfreq] * n_channels,
        'low_cutoff': ['%.2f' % low_cutoff] * n_channels,
        'high_cutoff': ['%.2f' % high_cutoff] * n_channels,
        'status': status
    })
    df = df[[
        'name', 'type', 'description', 'sampling_frequency', 'low_cutoff',
        'high_cutoff', 'status'
    ]]
    df.to_csv(fname, sep='\t', index=False)

    if verbose:
        print(os.linesep + "Writing '%s'..." % fname + os.linesep)
        print(df.head())

    return fname
예제 #3
0
def test_pick_seeg_ecog():
    """Test picking with sEEG and ECoG."""
    names = 'A1 A2 Fz O OTp1 OTp2 E1 OTp3 E2 E3'.split()
    types = 'mag mag eeg eeg seeg seeg ecog seeg ecog ecog'.split()
    info = create_info(names, 1024., types)
    picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), ('seeg', [4, 5, 7]),
                     ('ecog', [6, 8, 9])]
    assert_indexing(info, picks_by_type)
    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 7])
    for i, t in enumerate(types):
        assert_equal(channel_type(info, i), types[i])
    raw = RawArray(np.zeros((len(names), 10)), info)
    events = np.array([[1, 0, 0], [2, 0, 0]])
    epochs = Epochs(raw,
                    events=events,
                    event_id={'event': 0},
                    tmin=-1e-5,
                    tmax=1e-5,
                    baseline=(0, 0))  # only one sample
    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
    e_seeg = evoked.copy().pick_types(meg=False, seeg=True)
    for lt, rt in zip(e_seeg.ch_names, [names[4], names[5], names[7]]):
        assert lt == rt
    # Deal with constant debacle
    raw = read_raw_fif(
        op.join(io_dir, 'tests', 'data', 'test_chpi_raw_sss.fif'))
    assert_equal(len(pick_types(raw.info, meg=False, seeg=True, ecog=True)), 0)
예제 #4
0
def test_pick_dbs():
    """Test picking with DBS."""
    # gh-8739
    names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split()
    types = 'mag mag eeg eeg dbs dbs dbs'.split()
    info = create_info(names, 1024., types)
    picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), ('dbs', [4, 5, 6])]
    assert_indexing(info, picks_by_type)
    assert_array_equal(pick_types(info, meg=False, dbs=True), [4, 5, 6])
    for i, t in enumerate(types):
        assert channel_type(info, i) == types[i]
    raw = RawArray(np.zeros((len(names), 7)), info)
    events = np.array([[1, 0, 0], [2, 0, 0]])
    epochs = Epochs(raw,
                    events=events,
                    event_id={'event': 0},
                    tmin=-1e-5,
                    tmax=1e-5,
                    baseline=(0, 0))  # only one sample
    evoked = epochs.average(pick_types(epochs.info, meg=True, dbs=True))
    e_dbs = evoked.copy().pick_types(meg=False, dbs=True)
    for lt, rt in zip(e_dbs.ch_names, [names[4], names[5], names[6]]):
        assert lt == rt
    raw = read_raw_fif(
        op.join(io_dir, 'tests', 'data', 'test_chpi_raw_sss.fif'))
    assert len(pick_types(raw.info, meg=False, dbs=True)) == 0
예제 #5
0
    def _check_mne_info(self):
        class_name = class_name_of(self)
        error_hint = " Check the initialize() method"

        if self.mne_info is None:
            raise ValueError("{} node has empty mne_info "
                             "attribute.".format(class_name) + error_hint)

        channel_count = len(self.mne_info["chs"])
        if len(self.mne_info["chs"]) == 0:
            raise ValueError("{} node has 0 channels in its mne_info "
                             "attribute.".format(class_name) + error_hint)

        channel_types = {
            channel_type(self.mne_info, i)
            for i in np.arange(channel_count)
        }
        required_channel_types = {"grad", "mag", "eeg"}
        if len(channel_types.intersection(required_channel_types)) == 0:
            raise ValueError("{} has no channels of types {}".format(
                class_name, required_channel_types) + error_hint)

        try:
            self.mne_info._check_consistency()
        except RuntimeError as e:
            exception_message = ("The mne_info attribute of {} node is not "
                                 "self-consistent".format(class_name_of(self)))
            raise Exception(exception_message) from e
예제 #6
0
def test_pick_seeg_ecog():
    """Test picking with sEEG and ECoG
    """
    names = 'A1 A2 Fz O OTp1 OTp2 E1 OTp3 E2 E3'.split()
    types = 'mag mag eeg eeg seeg seeg ecog seeg ecog ecog'.split()
    info = create_info(names, 1024., types)
    idx = channel_indices_by_type(info)
    assert_array_equal(idx['mag'], [0, 1])
    assert_array_equal(idx['eeg'], [2, 3])
    assert_array_equal(idx['seeg'], [4, 5, 7])
    assert_array_equal(idx['ecog'], [6, 8, 9])
    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 7])
    for i, t in enumerate(types):
        assert_equal(channel_type(info, i), types[i])
    raw = RawArray(np.zeros((len(names), 10)), info)
    events = np.array([[1, 0, 0], [2, 0, 0]])
    epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5, add_eeg_ref=False)
    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
    e_seeg = evoked.copy().pick_types(meg=False, seeg=True)
    for l, r in zip(e_seeg.ch_names, [names[4], names[5], names[7]]):
        assert_equal(l, r)
    # Deal with constant debacle
    raw = read_raw_fif(op.join(io_dir, 'tests', 'data',
                               'test_chpi_raw_sss.fif'), add_eeg_ref=False)
    assert_equal(len(pick_types(raw.info, meg=False, seeg=True, ecog=True)), 0)
예제 #7
0
def compute_correlation_distance_matix(fwd):
    print(' Computing correlation distance matrix...')
    distance_matrix = np.zeros(
        (fwd['sol']['data'].shape[1], fwd['sol']['data'].shape[1]))
    n_ch_tot = fwd['info']['nchan']
    ch_types = set(map(lambda x: channel_type(fwd['info'], x),
                       range(n_ch_tot)))

    for _t in ch_types:
        print('  Using {} sensors for computation...'.format(_t))
        if _t in ['mag', 'grad', 'planar1', 'planar2']:
            _fwd_t = pick_types_forward(fwd, meg=_t, ref_meg=False)
        elif _t == 'eeg':
            _fwd_t = pick_types_forward(fwd, eeg=_t, ref_meg=False)
        else:
            raise NotImplementedError

        n_ch_t = _fwd_t['info']['nchan']
        _lf_t = _fwd_t['sol']['data']
        _dm_t = ssd.cdist(_lf_t.T, _lf_t.T, metric='correlation')

        distance_matrix += (n_ch_t / n_ch_tot) * _dm_t

    print(' [done]')
    return distance_matrix
예제 #8
0
 def channel_properties(self):
     info = data.current.raw.info
     dialog = ChannelPropertiesDialog(self, info)
     if dialog.exec_():
         dialog.model.sort(0)
         bads = []
         renamed = {}
         types = {}
         for i in range(dialog.model.rowCount()):
             new_label = dialog.model.item(i, 1).data(Qt.DisplayRole)
             old_label = info["ch_names"][i]
             if new_label != old_label:
                 renamed[old_label] = new_label
             new_type = dialog.model.item(i, 2).data(Qt.DisplayRole).lower()
             old_type = channel_type(info, i).lower()
             if new_type != old_type:
                 types[new_label] = new_type
             if dialog.model.item(i, 3).checkState() == Qt.Checked:
                 bads.append(info["ch_names"][i])
         info["bads"] = bads
         data.data[data.index].raw.info["bads"] = bads
         if renamed:
             mne.rename_channels(info, renamed)
             mne.rename_channels(data.data[data.index].raw.info, renamed)
         if types:
             data.current.raw.set_channel_types(types)
             data.data[data.index].raw.set_channel_types(types)
         self._update_infowidget()
         self._toggle_actions(True)
예제 #9
0
def test_pick_seeg_ecog():
    """Test picking with sEEG and ECoG
    """
    names = 'A1 A2 Fz O OTp1 OTp2 E1 OTp3 E2 E3'.split()
    types = 'mag mag eeg eeg seeg seeg ecog seeg ecog ecog'.split()
    info = create_info(names, 1024., types)
    idx = channel_indices_by_type(info)
    assert_array_equal(idx['mag'], [0, 1])
    assert_array_equal(idx['eeg'], [2, 3])
    assert_array_equal(idx['seeg'], [4, 5, 7])
    assert_array_equal(idx['ecog'], [6, 8, 9])
    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 7])
    for i, t in enumerate(types):
        assert_equal(channel_type(info, i), types[i])
    raw = RawArray(np.zeros((len(names), 10)), info)
    events = np.array([[1, 0, 0], [2, 0, 0]])
    epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5)
    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
    e_seeg = evoked.copy().pick_types(meg=False, seeg=True)
    for l, r in zip(e_seeg.ch_names, [names[4], names[5], names[7]]):
        assert_equal(l, r)
    # Deal with constant debacle
    raw = read_raw_fif(
        op.join(io_dir, 'tests', 'data', 'test_chpi_raw_sss.fif'))
    assert_equal(len(pick_types(raw.info, meg=False, seeg=True, ecog=True)), 0)
예제 #10
0
def test_bdf_stim_channel():
    """Test BDF stim channel."""
    # test if last channel is detected as STIM by default
    raw_py = _test_raw_reader(read_raw_edf, input_fname=bdf_path,
                              stim_channel='auto')
    assert channel_type(raw_py.info, raw_py.info["nchan"] - 1) == 'stim'

    # test BDF file with wrong scaling info in header - this should be ignored
    # for BDF stim channels
    events = [[242, 0, 4],
              [310, 0, 2],
              [952, 0, 1],
              [1606, 0, 1],
              [2249, 0, 1],
              [2900, 0, 1],
              [3537, 0, 1],
              [4162, 0, 1],
              [4790, 0, 1]]
    with pytest.deprecated_call(match='stim_channel'):
        raw = read_raw_edf(bdf_stim_channel_path, preload=True)
    bdf_events = find_events(raw)
    assert_array_equal(events, bdf_events)
    raw = read_raw_edf(bdf_stim_channel_path, preload=False,
                       stim_channel='auto')
    bdf_events = find_events(raw)
    assert_array_equal(events, bdf_events)
예제 #11
0
    def _check_mne_info(self):
        class_name = class_name_of(self)
        error_hint = ' Check the initialize() method'

        if self.mne_info is None:
            raise ValueError('{} node has empty mne_info '
                             'attribute.'.format(class_name) + error_hint)

        channel_count = len(self.mne_info['chs'])
        if len(self.mne_info['chs']) == 0:
            raise ValueError('{} node has 0 channels in its mne_info '
                             'attribute.'.format(class_name) + error_hint)

        channel_types = {
            channel_type(self.mne_info, i) for i in np.arange(channel_count)}
        required_channel_types = {'grad', 'mag', 'eeg'}
        if len(channel_types.intersection(required_channel_types)) == 0:
            raise ValueError('{} has no channels of types {}'.format(
                class_name, required_channel_types) + error_hint)

        try:
            self.mne_info._check_consistency()
        except RuntimeError as e:
            exception_message = ('The mne_info attribute of {} node is not '
                                 'self-consistent'.format(class_name_of(self)))
            raise Exception(exception_message) from e
예제 #12
0
    def get_info(self):
        """Get basic information on current file.

        Returns
        -------
        info : dict
            Dictionary with information on current file.
        """
        raw = data.current.raw
        fname = data.current.fname
        ftype = data.current.ftype
        reference = data.current.reference
        events = data.current.events
        montage = data.current.montage

        if raw.info["bads"]:
            nbads = len(raw.info["bads"])
            nchan = "{} ({} bad)".format(raw.info["nchan"], nbads)
        else:
            nchan = raw.info["nchan"]
        chans = Counter([channel_type(raw.info, i)
                         for i in range(raw.info["nchan"])])

        if events is not None:
            nevents = events.shape[0]
            unique = [str(e) for e in sorted(set(events[:, 2]))]
            if len(unique) > 20:  # do not show all events
                first = ", ".join(unique[:10])
                last = ", ".join(unique[-10:])
                events = "{} ({})".format(nevents, first + ", ..., " + last)
            else:
                events = "{} ({})".format(nevents, ", ".join(unique))
        else:
            events = "-"

        if isinstance(reference, list):
            reference = ",".join(reference)

        if raw.annotations is not None:
            annots = len(raw.annotations.description)
        else:
            annots = "-"

        return {"File name": fname if fname else "-",
                "File type": ftype if ftype else "-",
                "Number of channels": nchan,
                "Channels": ", ".join(
                    [" ".join([str(v), k.upper()]) for k, v in chans.items()]),
                "Samples": raw.n_times,
                "Sampling frequency": str(raw.info["sfreq"]) + " Hz",
                "Length": str(raw.n_times / raw.info["sfreq"]) + " s",
                "Events": events,
                "Annotations": annots,
                "Reference": reference if reference else "-",
                "Montage": montage if montage is not None else "-",
                "Size in memory": "{:.2f} MB".format(
                    raw._data.nbytes / 1024 ** 2),
                "Size on disk": "-" if not fname else "{:.2f} MB".format(
                    getsize(fname) / 1024 ** 2)}
예제 #13
0
def test_pick_chpi():
    """Test picking cHPI."""
    # Make sure we don't mis-classify cHPI channels
    info = read_info(op.join(io_dir, 'tests', 'data', 'test_chpi_raw_sss.fif'))
    channel_types = {channel_type(info, idx) for idx in range(info['nchan'])}
    assert 'chpi' in channel_types
    assert 'seeg' not in channel_types
    assert 'ecog' not in channel_types
예제 #14
0
def test_pick_chpi():
    """Test picking cHPI."""
    # Make sure we don't mis-classify cHPI channels
    info = read_info(op.join(io_dir, 'tests', 'data', 'test_chpi_raw_sss.fif'))
    _assert_channel_types(info)
    channel_types = {channel_type(info, idx) for idx in range(info['nchan'])}
    assert 'chpi' in channel_types
    assert 'seeg' not in channel_types
    assert 'ecog' not in channel_types
예제 #15
0
def test_pick_chpi():
    """Test picking cHPI
    """
    # Make sure we don't mis-classify cHPI channels
    info = read_info(op.join(io_dir, 'tests', 'data', 'test_chpi_raw_sss.fif'))
    channel_types = set([channel_type(info, idx)
                         for idx in range(info['nchan'])])
    assert_true('chpi' in channel_types)
    assert_true('seeg' not in channel_types)
예제 #16
0
def cli(matfiles, savename, rec_type, infosrc):
    """
    Convert brainstorm epochs to mne.Epochs object
    """
    if infosrc:
        if rec_type is 'ds':
            from mne.io import read_raw_ctf as read_raw
        elif rec_type is 'fif':
            from mne.io import Raw as read_raw
        with nostdout():
            raw_with_info = read_raw(infosrc)

    isFirst = True
    for fname in matfiles:
        with nostdout():
            mat_epoch = sio.loadmat(fname)
            # click.echo(mat_epoch)
        if isFirst:
            data = mat_epoch['F']
            times = mat_epoch['Time']
            # print times[0,-1]
            isFirst = False
        else:
            data = np.dstack((data, mat_epoch['F']))
        # click.echo(data.shape)
    data = data.transpose((2,0,1))


    n_channels = data.shape[1]
    sfreq = times.shape[1] / (times[0,-1] + times[0,1])
    
    
    if infosrc:
        if rec_type is 'ds':
            from mne.io import read_raw_ctf as read_raw
        elif rec_type is 'fif':
            from mne.io import Raw as read_raw

        with nostdout():
            raw_with_info = read_raw(infosrc)
        good_info = raw_with_info.info
        # click.echo(len(good_info['ch_names']))

        ch_types = [channel_type(good_info, idx) for idx in range(n_channels)]

        # click.echo(len(ch_types))

        info = create_info(ch_names=good_info['ch_names'], sfreq=sfreq, ch_types=ch_types)
    else:
        ch_types='mag'
        info = create_info(n_channels, sfreq, ch_types)

    with nostdout():
        epochs = EpochsArray(data, info)
    epochs.save(savename)
예제 #17
0
def _channel_tsv(raw, fname):
    """Create channel tsv."""

    map_chs = dict(grad='MEGGRAD',
                   mag='MEGMAG',
                   stim='TRIG',
                   eeg='EEG',
                   eog='EOG',
                   ecg='ECG',
                   misc='MISC')
    map_desc = dict(grad='Gradiometer',
                    mag='Magnetometer',
                    stim='Trigger',
                    eeg='ElectroEncephaloGram',
                    ecg='ElectroCardioGram',
                    eog='ElectrOculoGram',
                    misc='Miscellaneous')

    status, ch_type, description = list(), list(), list()
    for idx, ch in enumerate(raw.info['ch_names']):
        status.append('bad' if ch in raw.info['bads'] else 'good')
        ch_type.append(map_chs[channel_type(raw.info, idx)])
        description.append(map_desc[channel_type(raw.info, idx)])
    low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])
    n_channels = raw.info['nchan']
    sfreq = raw.info['sfreq']
    df = pd.DataFrame({
        'name': raw.info['ch_names'],
        'type': ch_type,
        'description': description,
        'sampling_frequency': ['%.2f' % sfreq] * n_channels,
        'low_cutoff': ['%.2f' % low_cutoff] * n_channels,
        'high_cutoff': ['%.2f' % high_cutoff] * n_channels,
        'status': status
    })
    df = df[[
        'name', 'type', 'description', 'sampling_frequency', 'low_cutoff',
        'high_cutoff', 'status'
    ]]
    df.to_csv(fname, sep='\t', index=False)
예제 #18
0
def test_get_channel_types_equiv(meg, eeg, ordered):
    """Test equivalence of get_channel_types."""
    raw = read_raw_fif(fif_fname)
    pick_types(raw.info, meg=meg, eeg=eeg)
    picks = pick_types(raw.info, meg=meg, eeg=eeg)
    if not ordered:
        picks = np.random.RandomState(0).permutation(picks)
    if not meg and not eeg:
        with pytest.raises(ValueError, match='No appropriate channels'):
            raw.get_channel_types(picks=picks)
        return
    types = np.array(raw.get_channel_types(picks=picks))
    types_iter = np.array([channel_type(raw.info, idx) for idx in picks])
    assert_array_equal(types, types_iter)
예제 #19
0
    def __init__(self, parent, info, title="Channel Properties"):
        super().__init__(parent)
        self.setWindowTitle(title)

        self.model = QStandardItemModel(info["nchan"], 4)
        self.model.setHorizontalHeaderLabels(["#", "Label", "Type", "Bad"])
        for index, ch in enumerate(info["chs"]):
            item = QStandardItem()
            item.setData(index, Qt.DisplayRole)
            item.setFlags(item.flags() & ~Qt.ItemIsEditable)
            self.model.setItem(index, 0, item)
            self.model.setItem(index, 1, QStandardItem(ch["ch_name"]))
            kind = channel_type(info, index).upper()
            self.model.setItem(index, 2, QStandardItem(str(kind)))
            bad = QStandardItem()
            bad.setData(ch["ch_name"] in info["bads"], Qt.UserRole)
            bad.setCheckable(True)
            bad.setEditable(False)
            checked = ch["ch_name"] in info["bads"]
            bad.setCheckState(Qt.Checked if checked else Qt.Unchecked)
            self.model.setItem(index, 3, bad)

        self.model.itemChanged.connect(bad_changed)
        self.proxymodel = MySortFilterProxyModel()
        self.proxymodel.setDynamicSortFilter(False)
        self.proxymodel.setSourceModel(self.model)

        self.view = QTableView()
        self.view.setModel(self.proxymodel)
        self.view.setItemDelegateForColumn(2, ComboBoxDelegate(self.view))
        self.view.setEditTriggers(QAbstractItemView.AllEditTriggers)
        self.view.verticalHeader().setVisible(False)
        self.view.horizontalHeader().setStretchLastSection(True)
        self.view.setShowGrid(False)
        self.view.setSelectionMode(QAbstractItemView.NoSelection)
        self.view.setSortingEnabled(True)
        self.view.sortByColumn(0, Qt.AscendingOrder)

        vbox = QVBoxLayout(self)
        vbox.addWidget(self.view)
        self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok
                                          | QDialogButtonBox.Cancel)
        vbox.addWidget(self.buttonbox)
        self.buttonbox.accepted.connect(self.accept)
        self.buttonbox.rejected.connect(self.reject)

        self.resize(500, 650)
        self.view.setColumnWidth(0, 75)
        self.view.setColumnWidth(1, 150)
        self.view.setColumnWidth(2, 90)
예제 #20
0
def test_bdf_stim_channel():
    """Test BDF stim channel."""
    # test if last channel is detected as STIM by default
    raw_py = _test_raw_reader(read_raw_edf, input_fname=bdf_path)
    assert channel_type(raw_py.info, raw_py.info["nchan"] - 1) == 'stim'

    # test BDF file with wrong scaling info in header - this should be ignored
    # for BDF stim channels
    events = [[242, 0, 4], [310, 0, 2], [952, 0, 1], [1606, 0, 1],
              [2249, 0, 1], [2900, 0, 1], [3537, 0, 1], [4162, 0, 1],
              [4790, 0, 1]]
    raw = read_raw_edf(bdf_stim_channel_path, preload=True)
    bdf_events = find_events(raw)
    assert_array_equal(events, bdf_events)
    raw = read_raw_edf(bdf_stim_channel_path, preload=False)
    bdf_events = find_events(raw)
    assert_array_equal(events, bdf_events)
예제 #21
0
def test_edf_stim_channel():
    """Test stim channel for edf file."""
    # test if stim channel is automatically detected
    raw = read_raw_edf(edf_path, preload=True)
    assert channel_type(raw.info, raw.info["nchan"] - 1) == 'stim'

    raw = read_raw_edf(edf_stim_channel_path, preload=True, stim_channel=-1)
    true_data = np.loadtxt(edf_txt_stim_channel_path).T

    # EDF writer pad data if file to small
    _, ns = true_data.shape
    edf_data = raw._data[:, :ns]

    # assert stim channels are equal
    assert_array_equal(true_data[-1], edf_data[-1])

    # assert data are equal
    assert_array_almost_equal(true_data[0:-1] * 1e-6, edf_data[0:-1])
예제 #22
0
def test_pick_seeg():
    names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split()
    types = 'mag mag eeg eeg seeg seeg seeg'.split()
    info = create_info(names, 1024., types)
    idx = channel_indices_by_type(info)
    assert_array_equal(idx['mag'], [0, 1])
    assert_array_equal(idx['eeg'], [2, 3])
    assert_array_equal(idx['seeg'], [4, 5, 6])
    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 6])
    for i, t in enumerate(types):
        assert_equal(channel_type(info, i), types[i])
    raw = RawArray(zeros((len(names), 10)), info)
    events = array([[1, 0, 0], [2, 0, 0]]).astype('d')
    epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5)
    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
    e_seeg = pick_types_evoked(evoked, meg=False, seeg=True)
    for l, r in zip(e_seeg.ch_names, names[4:]):
        assert_equal(l, r)
예제 #23
0
def test_pick_seeg():
    names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split()
    types = 'mag mag eeg eeg seeg seeg seeg'.split()
    info = create_info(names, 1024., types)
    idx = channel_indices_by_type(info)
    assert_array_equal(idx['mag'], [0, 1])
    assert_array_equal(idx['eeg'], [2, 3])
    assert_array_equal(idx['seeg'], [4, 5, 6])
    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 6])
    for i, t in enumerate(types):
        assert_equal(channel_type(info, i), types[i])
    raw = RawArray(zeros((len(names), 10)), info)
    events = array([[1, 0, 0], [2, 0, 0]]).astype('d')
    epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5)
    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
    e_seeg = pick_types_evoked(evoked, meg=False, seeg=True)
    for l, r in zip(e_seeg.ch_names, names[4:]):
        assert_equal(l, r)
예제 #24
0
def test_edf_stim_channel():
    """Test stim channel for edf file."""
    # test if stim channel is automatically detected
    raw = read_raw_edf(edf_path, preload=True)
    assert_true(channel_type(raw.info, raw.info["nchan"] - 1) == 'stim')

    raw = read_raw_edf(edf_stim_channel_path, preload=True,
                       stim_channel=-1)
    true_data = np.loadtxt(edf_txt_stim_channel_path).T

    # EDF writer pad data if file to small
    _, ns = true_data.shape
    edf_data = raw._data[:, :ns]

    # assert stim channels are equal
    assert_array_equal(true_data[-1], edf_data[-1])

    # assert data are equal
    assert_array_almost_equal(true_data[0:-1] * 1e-6, edf_data[0:-1])
예제 #25
0
def test_pick_seeg():
    """Test picking with SEEG
    """
    names = "A1 A2 Fz O OTp1 OTp2 OTp3".split()
    types = "mag mag eeg eeg seeg seeg seeg".split()
    info = create_info(names, 1024.0, types)
    idx = channel_indices_by_type(info)
    assert_array_equal(idx["mag"], [0, 1])
    assert_array_equal(idx["eeg"], [2, 3])
    assert_array_equal(idx["seeg"], [4, 5, 6])
    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 6])
    for i, t in enumerate(types):
        assert_equal(channel_type(info, i), types[i])
    raw = RawArray(np.zeros((len(names), 10)), info)
    events = np.array([[1, 0, 0], [2, 0, 0]]).astype("d")
    epochs = Epochs(raw, events, {"event": 0}, -1e-5, 1e-5)
    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
    e_seeg = evoked.pick_types(meg=False, seeg=True, copy=True)
    for l, r in zip(e_seeg.ch_names, names[4:]):
        assert_equal(l, r)
예제 #26
0
    def get_info(self):
        """Get basic information on current file.
        """
        raw = self.datasets.current.raw
        fname = self.datasets.current.fname

        nchan = raw.info["nchan"]
        chans = Counter([channel_type(raw.info, i) for i in range(nchan)])

        return {"File name": fname if fname else "-",
                "Number of channels": raw.info["nchan"],
                "Channels": ", ".join(
                    [" ".join([str(v), k.upper()]) for k, v in chans.items()]),
                "Samples": raw.n_times,
                "Sampling frequency": str(raw.info["sfreq"]) + " Hz",
                "Length": str(raw.n_times / raw.info["sfreq"]) + " s",
                "Size in memory": "{:.2f} MB".format(
                    raw._data.nbytes / 1024 ** 2),
                "Size on disk": "-" if not fname else "{:.2f} MB".format(
                    getsize(fname) / 1024 ** 2)}
예제 #27
0
 def channel_properties(self):
     """Show channel properties dialog."""
     info = self.model.current["data"].info
     dialog = ChannelPropertiesDialog(self, info)
     if dialog.exec_():
         dialog.model.sort(0)
         bads = []
         renamed = {}
         types = {}
         for i in range(dialog.model.rowCount()):
             new_label = dialog.model.item(i, 1).data(Qt.DisplayRole)
             old_label = info["ch_names"][i]
             if new_label != old_label:
                 renamed[old_label] = new_label
             new_type = dialog.model.item(i, 2).data(Qt.DisplayRole).lower()
             old_type = channel_type(info, i).lower()
             if new_type != old_type:
                 types[new_label] = new_type
             if dialog.model.item(i, 3).checkState() == Qt.Checked:
                 bads.append(info["ch_names"][i])
         self.model.set_channel_properties(bads, renamed, types)
예제 #28
0
def test_pick_seeg():
    """Test picking with SEEG
    """
    names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split()
    types = 'mag mag eeg eeg seeg seeg seeg'.split()
    info = create_info(names, 1024., types)
    idx = channel_indices_by_type(info)
    assert_array_equal(idx['mag'], [0, 1])
    assert_array_equal(idx['eeg'], [2, 3])
    assert_array_equal(idx['seeg'], [4, 5, 6])
    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 6])
    for i, t in enumerate(types):
        assert_equal(channel_type(info, i), types[i])
    raw = RawArray(np.zeros((len(names), 10)), info)
    events = np.array([[1, 0, 0], [2, 0, 0]])
    epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5)
    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
    e_seeg = evoked.pick_types(meg=False, seeg=True, copy=True)
    for l, r in zip(e_seeg.ch_names, names[4:]):
        assert_equal(l, r)
    # Deal with constant debacle
    raw = Raw(fname_mc)
    assert_equal(len(pick_types(raw.info, meg=False, seeg=True)), 0)
예제 #29
0
def _channels_tsv(raw, fname, overwrite=False, verbose=True):
    """Create a channels.tsv file and save it.

    Parameters
    ----------
    raw : instance of Raw
        The data as MNE-Python Raw object.
    fname : str
        Filename to save the channels.tsv to.
    overwrite : bool
        Whether to overwrite the existing file.
        Defaults to False.
    verbose : bool
        Set verbose output to true or false.

    """
    map_chs = defaultdict(lambda: 'OTHER')
    map_chs.update(meggradaxial='MEGGRADAXIAL',
                   megrefgradaxial='MEGREFGRADAXIAL',
                   meggradplanar='MEGGRADPLANAR',
                   megmag='MEGMAG',
                   megrefmag='MEGREFMAG',
                   eeg='EEG',
                   misc='MISC',
                   stim='TRIG',
                   emg='EMG',
                   ecog='ECOG',
                   seeg='SEEG',
                   eog='EOG',
                   ecg='ECG')
    map_desc = defaultdict(lambda: 'Other type of channel')
    map_desc.update(meggradaxial='Axial Gradiometer',
                    megrefgradaxial='Axial Gradiometer Reference',
                    meggradplanar='Planar Gradiometer',
                    megmag='Magnetometer',
                    megrefmag='Magnetometer Reference',
                    stim='Trigger',
                    eeg='ElectroEncephaloGram',
                    ecog='Electrocorticography',
                    seeg='StereoEEG',
                    ecg='ElectroCardioGram',
                    eog='ElectroOculoGram',
                    emg='ElectroMyoGram',
                    misc='Miscellaneous')
    get_specific = ('mag', 'ref_meg', 'grad')

    # get the manufacturer from the file in the Raw object
    manufacturer = None
    if hasattr(raw, 'filenames'):
        # XXX: Hack for EEGLAB bug in MNE-Python 0.16; fixed in MNE-Python
        # 0.17, ... remove the hack after upgrading dependencies in MNE-BIDS
        if raw.filenames[0] is None:  # hack
            ext = '.set'  # hack
        else:
            _, ext = _parse_ext(raw.filenames[0], verbose=verbose)
            manufacturer = MANUFACTURERS[ext]

    ignored_indexes = [
        raw.ch_names.index(ch_name) for ch_name in raw.ch_names
        if ch_name in IGNORED_CHANNELS.get(manufacturer, list())
    ]

    status, ch_type, description = list(), list(), list()
    for idx, ch in enumerate(raw.info['ch_names']):
        status.append('bad' if ch in raw.info['bads'] else 'good')
        _channel_type = channel_type(raw.info, idx)
        if _channel_type in get_specific:
            _channel_type = coil_type(raw.info, idx)
        ch_type.append(map_chs[_channel_type])
        description.append(map_desc[_channel_type])
    low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])
    units = [_unit2human.get(ch_i['unit'], 'n/a') for ch_i in raw.info['chs']]
    units = [u if u not in ['NA'] else 'n/a' for u in units]
    n_channels = raw.info['nchan']
    sfreq = raw.info['sfreq']

    df = pd.DataFrame(
        OrderedDict([('name', raw.info['ch_names']), ('type', ch_type),
                     ('units', units), ('description', description),
                     ('sampling_frequency', np.full((n_channels), sfreq)),
                     ('low_cutoff', np.full((n_channels), low_cutoff)),
                     ('high_cutoff', np.full((n_channels), high_cutoff)),
                     ('status', status)]))
    df.drop(ignored_indexes, inplace=True)

    _write_tsv(fname, df, overwrite, verbose)

    return fname
예제 #30
0
def _plot_evoked(evoked, plot_type, colorbar=True, hline=None, ylim=None,
                picks=None, exclude='bads', unit=True, show=True,
                      clim=None, proj=False, xlim='tight', units=None,
                      scalings=None, titles=None, axes=None, cmap='RdBu_r'):
    """Aux function for plot_evoked and plot_evoked_image (cf. docstrings)

    Extra param is:

    plot_type : str, value ('butterfly' | 'image')
        The type of graph to plot: 'butterfly' plots each channel as a line
        (x axis: time, y axis: amplitude). 'image' plots a 2D image where
        color depicts the amplitude of each channel at a given time point
        (x axis: time, y axis: channel). In 'image' mode, the plot is not
        interactive.
    """
    import matplotlib.pyplot as plt
    if axes is not None and proj == 'interactive':
        raise RuntimeError('Currently only single axis figures are supported'
                           ' for interactive SSP selection.')

    scalings = _handle_default('scalings', scalings)
    titles = _handle_default('titles', titles)
    units = _handle_default('units', units)

    channel_types = set(key for d in [scalings, titles, units] for key in d)
    channel_types = sorted(channel_types)  # to guarantee consistent order

    if picks is None:
        picks = list(range(evoked.info['nchan']))

    bad_ch_idx = [evoked.ch_names.index(ch) for ch in evoked.info['bads']
                  if ch in evoked.ch_names]
    if len(exclude) > 0:
        if isinstance(exclude, string_types) and exclude == 'bads':
            exclude = bad_ch_idx
        elif (isinstance(exclude, list)
              and all([isinstance(ch, string_types) for ch in exclude])):
            exclude = [evoked.ch_names.index(ch) for ch in exclude]
        else:
            raise ValueError('exclude has to be a list of channel names or '
                             '"bads"')

        picks = list(set(picks).difference(exclude))

    types = [channel_type(evoked.info, idx) for idx in picks]
    n_channel_types = 0
    ch_types_used = []
    for t in channel_types:
        if t in types:
            n_channel_types += 1
            ch_types_used.append(t)

    axes_init = axes  # remember if axes where given as input

    fig = None
    if axes is None:
        fig, axes = plt.subplots(n_channel_types, 1)

    if isinstance(axes, plt.Axes):
        axes = [axes]
    elif isinstance(axes, np.ndarray):
        axes = list(axes)

    if axes_init is not None:
        fig = axes[0].get_figure()

    if not len(axes) == n_channel_types:
        raise ValueError('Number of axes (%g) must match number of channel '
                         'types (%g)' % (len(axes), n_channel_types))

    # instead of projecting during each iteration let's use the mixin here.
    if proj is True and evoked.proj is not True:
        evoked = evoked.copy()
        evoked.apply_proj()

    times = 1e3 * evoked.times  # time in miliseconds
    for ax, t in zip(axes, ch_types_used):
        ch_unit = units[t]
        this_scaling = scalings[t]
        if unit is False:
            this_scaling = 1.0
            ch_unit = 'NA'  # no unit
        idx = [picks[i] for i in range(len(picks)) if types[i] == t]
        if len(idx) > 0:
            # Parameters for butterfly interactive plots
            if plot_type == 'butterfly':
                if any([i in bad_ch_idx for i in idx]):
                    colors = ['k'] * len(idx)
                    for i in bad_ch_idx:
                        if i in idx:
                            colors[idx.index(i)] = 'r'

                    ax._get_lines.color_cycle = iter(colors)
                else:
                    ax._get_lines.color_cycle = cycle(['k'])
            # Set amplitude scaling
            D = this_scaling * evoked.data[idx, :]
            # plt.axes(ax)
            if plot_type == 'butterfly':
                ax.plot(times, D.T)
            elif plot_type == 'image':
                im = ax.imshow(D, interpolation='nearest', origin='lower',
                               extent=[times[0], times[-1], 0, D.shape[0]],
                               aspect='auto', cmap=cmap)
                if colorbar:
                    cbar = plt.colorbar(im, ax=ax)
                    cbar.ax.set_title(ch_unit)
            elif plot_type == 'mean' :
#                 ax.plot(times, D.mean(axis=0))
                ax.plot(times, np.abs(D).mean(axis=0))
            if xlim is not None:
                if xlim == 'tight':
                    xlim = (times[0], times[-1])
                ax.set_xlim(xlim)
            if ylim is not None and t in ylim:
                if plot_type == 'butterfly' or plot_type == 'mean':
                    ax.set_ylim(ylim[t])
                elif plot_type == 'image':
                    im.set_clim(ylim[t])
            ax.set_title(titles[t] + ' (%d channel%s)' % (
                         len(D), 's' if len(D) > 1 else ''))
            ax.set_xlabel('time (ms)')
            if plot_type == 'butterfly' or plot_type == 'mean':
                ax.set_ylabel('data (%s)' % ch_unit)
            elif plot_type == 'image':
                ax.set_ylabel('channels (%s)' % 'index')
            else:
                raise ValueError("plot_type has to be 'butterfly' or 'image'."
                                 "Got %s." % plot_type)

            if (plot_type == 'butterfly' or plot_type == 'mean') and (hline is not None):
                for h in hline:
                    ax.axhline(h, color='r', linestyle='--', linewidth=2)

    if axes_init is None:
        plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)

    # if proj == 'interactive':
    #     _check_delayed_ssp(evoked)
    #     params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
    #                   axes=axes, types=types, units=units, scalings=scalings,
    #                   unit=unit, ch_types_used=ch_types_used, picks=picks,
    #                   plot_update_proj_callback=_plot_update_evoked,
    #                   plot_type=plot_type)
    #     _draw_proj_checkbox(None, params)

    if show and plt.get_backend() != 'agg':
        plt.show()
        fig.canvas.draw()  # for axes plots update axes.
    tight_layout(fig=fig)

    return fig
예제 #31
0
def test_rank():
    """Test cov rank estimation."""
    # Test that our rank estimation works properly on a simple case
    evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
                          proj=False)
    cov = read_cov(cov_fname)
    ch_names = [ch for ch in evoked.info['ch_names'] if '053' not in ch and
                ch.startswith('EEG')]
    cov = prepare_noise_cov(cov, evoked.info, ch_names, None)
    assert_equal(cov['eig'][0], 0.)  # avg projector should set this to zero
    assert_true((cov['eig'][1:] > 0).all())  # all else should be > 0

    # Now do some more comprehensive tests
    raw_sample = read_raw_fif(raw_fname)

    raw_sss = read_raw_fif(hp_fif_fname)
    raw_sss.add_proj(compute_proj_raw(raw_sss))

    cov_sample = compute_raw_covariance(raw_sample)
    cov_sample_proj = compute_raw_covariance(
        raw_sample.copy().apply_proj())

    cov_sss = compute_raw_covariance(raw_sss)
    cov_sss_proj = compute_raw_covariance(
        raw_sss.copy().apply_proj())

    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)

    info_sample = pick_info(raw_sample.info, picks_all_sample)
    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
                                             eeg=True))]
    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
    picks_stack_sample += [('all',
                            pick_types(info_sample, meg=True, eeg=True))]

    info_sss = pick_info(raw_sss.info, picks_all_sss)
    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
    picks_stack_somato += [('all',
                            pick_types(info_sss, meg=True, eeg=True))]

    iter_tests = list(itt.product(
        [(cov_sample, picks_stack_sample, info_sample),
         (cov_sample_proj, picks_stack_sample, info_sample),
         (cov_sss, picks_stack_somato, info_sss),
         (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
        [dict(mag=1e15, grad=1e13, eeg=1e6)]
    ))

    for (cov, picks_list, this_info), scalings in iter_tests:
        for ch_type, picks in picks_list:

            this_very_info = pick_info(this_info, picks)

            # compute subset of projs
            this_projs = [c['active'] and
                          len(set(c['data']['col_names'])
                              .intersection(set(this_very_info['ch_names']))) >
                          0 for c in cov['projs']]
            n_projs = sum(this_projs)

            # count channel types
            ch_types = [channel_type(this_very_info, idx)
                        for idx in range(len(picks))]
            n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
                                    ['eeg', 'mag', 'grad']]
            n_meg = n_mag + n_grad
            if ch_type in ('all', 'eeg'):
                n_projs_eeg = 1
            else:
                n_projs_eeg = 0

            # check sss
            if len(this_very_info['proc_history']) > 0:
                mf = this_very_info['proc_history'][0]['max_info']
                n_free = _get_sss_rank(mf)
                if 'mag' not in ch_types and 'grad' not in ch_types:
                    n_free = 0
                # - n_projs XXX clarify
                expected_rank = n_free + n_eeg
                if n_projs > 0 and ch_type in ('all', 'eeg'):
                    expected_rank -= n_projs_eeg
            else:
                expected_rank = n_meg + n_eeg - n_projs

            C = cov['data'][np.ix_(picks, picks)]
            est_rank = _estimate_rank_meeg_cov(C, this_very_info,
                                               scalings=scalings)

            assert_equal(expected_rank, est_rank)
예제 #32
0
def test_rank():
    """Test cov rank estimation"""
    raw_sample = Raw(raw_fname)

    raw_sss = Raw(hp_fif_fname)
    raw_sss.add_proj(compute_proj_raw(raw_sss))

    cov_sample = compute_raw_data_covariance(raw_sample)
    cov_sample_proj = compute_raw_data_covariance(
        raw_sample.copy().apply_proj())

    cov_sss = compute_raw_data_covariance(raw_sss)
    cov_sss_proj = compute_raw_data_covariance(raw_sss.copy().apply_proj())

    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)

    info_sample = pick_info(raw_sample.info, picks_all_sample)
    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
                                             eeg=True))]
    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
    picks_stack_sample += [('all', pick_types(info_sample, meg=True,
                                              eeg=True))]

    info_sss = pick_info(raw_sss.info, picks_all_sss)
    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
    picks_stack_somato += [('all', pick_types(info_sss, meg=True, eeg=True))]

    iter_tests = list(
        itt.product(
            [(cov_sample, picks_stack_sample, info_sample),
             (cov_sample_proj, picks_stack_sample, info_sample),
             (cov_sss, picks_stack_somato, info_sss),
             (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
            [dict(mag=1e15, grad=1e13, eeg=1e6)]))

    for (cov, picks_list, this_info), scalings in iter_tests:
        for ch_type, picks in picks_list:

            this_very_info = pick_info(this_info, picks)

            # compute subset of projs
            this_projs = [
                c['active'] and len(
                    set(c['data']['col_names']).intersection(
                        set(this_very_info['ch_names']))) > 0
                for c in cov['projs']
            ]
            n_projs = sum(this_projs)

            # count channel types
            ch_types = [
                channel_type(this_very_info, idx) for idx in range(len(picks))
            ]
            n_eeg, n_mag, n_grad = [
                ch_types.count(k) for k in ['eeg', 'mag', 'grad']
            ]
            n_meg = n_mag + n_grad
            if ch_type in ('all', 'eeg'):
                n_projs_eeg = 1
            else:
                n_projs_eeg = 0

            # check sss
            if 'proc_history' in this_very_info:
                mf = this_very_info['proc_history'][0]['max_info']
                n_free = _get_sss_rank(mf)
                if 'mag' not in ch_types and 'grad' not in ch_types:
                    n_free = 0
                # - n_projs XXX clarify
                expected_rank = n_free + n_eeg
                if n_projs > 0 and ch_type in ('all', 'eeg'):
                    expected_rank -= n_projs_eeg
            else:
                expected_rank = n_meg + n_eeg - n_projs

            C = cov['data'][np.ix_(picks, picks)]
            est_rank = _estimate_rank_meeg_cov(C,
                                               this_very_info,
                                               scalings=scalings)

            assert_equal(expected_rank, est_rank)
예제 #33
0
def _assert_channel_types(info):
    for k in range(info['nchan']):
        a, b = channel_type(info, k), _channel_type_old(info, k)
        assert a == b
예제 #34
0
def test_cov_rank_estimation(rank_method, proj, meg):
    """Test cov rank estimation."""
    # Test that our rank estimation works properly on a simple case
    evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
                          proj=False)
    cov = read_cov(cov_fname)
    ch_names = [ch for ch in evoked.info['ch_names'] if '053' not in ch and
                ch.startswith('EEG')]
    cov = prepare_noise_cov(cov, evoked.info, ch_names, None)
    assert cov['eig'][0] <= 1e-25  # avg projector should set this to zero
    assert (cov['eig'][1:] > 1e-16).all()  # all else should be > 0

    # Now do some more comprehensive tests
    raw_sample = read_raw_fif(raw_fname)
    assert not _has_eeg_average_ref_proj(raw_sample.info['projs'])

    raw_sss = read_raw_fif(hp_fif_fname)
    assert not _has_eeg_average_ref_proj(raw_sss.info['projs'])
    raw_sss.add_proj(compute_proj_raw(raw_sss, meg=meg))

    cov_sample = compute_raw_covariance(raw_sample)
    cov_sample_proj = compute_raw_covariance(raw_sample.copy().apply_proj())

    cov_sss = compute_raw_covariance(raw_sss)
    cov_sss_proj = compute_raw_covariance(raw_sss.copy().apply_proj())

    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)

    info_sample = pick_info(raw_sample.info, picks_all_sample)
    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
                                             eeg=True))]
    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
    picks_stack_sample += [('all',
                            pick_types(info_sample, meg=True, eeg=True))]

    info_sss = pick_info(raw_sss.info, picks_all_sss)
    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
    picks_stack_somato += [('all',
                            pick_types(info_sss, meg=True, eeg=True))]

    iter_tests = list(itt.product(
        [(cov_sample, picks_stack_sample, info_sample),
         (cov_sample_proj, picks_stack_sample, info_sample),
         (cov_sss, picks_stack_somato, info_sss),
         (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
        [dict(mag=1e15, grad=1e13, eeg=1e6)],
    ))

    for (cov, picks_list, iter_info), scalings in iter_tests:
        rank = compute_rank(cov, rank_method, scalings, iter_info,
                            proj=proj)
        rank['all'] = sum(rank.values())
        for ch_type, picks in picks_list:

            this_info = pick_info(iter_info, picks)

            # compute subset of projs, active and inactive
            n_projs_applied = sum(proj['active'] and
                                  len(set(proj['data']['col_names']) &
                                      set(this_info['ch_names'])) > 0
                                  for proj in cov['projs'])
            n_projs_info = sum(len(set(proj['data']['col_names']) &
                                   set(this_info['ch_names'])) > 0
                               for proj in this_info['projs'])

            # count channel types
            ch_types = [channel_type(this_info, idx)
                        for idx in range(len(picks))]
            n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
                                    ['eeg', 'mag', 'grad']]
            n_meg = n_mag + n_grad
            has_sss = (n_meg > 0 and len(this_info['proc_history']) > 0)
            if has_sss:
                n_meg = _get_rank_sss(this_info)

            expected_rank = n_meg + n_eeg
            if rank_method is None:
                if meg == 'combined' or not has_sss:
                    if proj:
                        expected_rank -= n_projs_info
                    else:
                        expected_rank -= n_projs_applied
            else:
                # XXX for now it just uses the total count
                assert rank_method == 'info'
                if proj:
                    expected_rank -= n_projs_info

            assert rank[ch_type] == expected_rank
예제 #35
0
def _assert_channel_types(info):
    for k in range(info['nchan']):
        a, b = channel_type(info, k), _channel_type_old(info, k)
        assert a == b
예제 #36
0
파일: viz.py 프로젝트: Qi0116/deepthought
def _plot_evoked(evoked, plot_type, colorbar=True, hline=None, ylim=None,
                picks=None, exclude='bads', unit=True, show=True,
                      clim=None, proj=False, xlim='tight', units=None,
                      scalings=None, titles=None, axes=None, cmap='RdBu_r'):
    """Aux function for plot_evoked and plot_evoked_image (cf. docstrings)

    Extra param is:

    plot_type : str, value ('butterfly' | 'image')
        The type of graph to plot: 'butterfly' plots each channel as a line
        (x axis: time, y axis: amplitude). 'image' plots a 2D image where
        color depicts the amplitude of each channel at a given time point
        (x axis: time, y axis: channel). In 'image' mode, the plot is not
        interactive.
    """
    import matplotlib.pyplot as plt
    if axes is not None and proj == 'interactive':
        raise RuntimeError('Currently only single axis figures are supported'
                           ' for interactive SSP selection.')

    scalings = _handle_default('scalings', scalings)
    titles = _handle_default('titles', titles)
    units = _handle_default('units', units)

    channel_types = set(key for d in [scalings, titles, units] for key in d)
    channel_types = sorted(channel_types)  # to guarantee consistent order

    if picks is None:
        picks = list(range(evoked.info['nchan']))

    bad_ch_idx = [evoked.ch_names.index(ch) for ch in evoked.info['bads']
                  if ch in evoked.ch_names]
    if len(exclude) > 0:
        if isinstance(exclude, string_types) and exclude == 'bads':
            exclude = bad_ch_idx
        elif (isinstance(exclude, list)
              and all([isinstance(ch, string_types) for ch in exclude])):
            exclude = [evoked.ch_names.index(ch) for ch in exclude]
        else:
            raise ValueError('exclude has to be a list of channel names or '
                             '"bads"')

        picks = list(set(picks).difference(exclude))

    types = [channel_type(evoked.info, idx) for idx in picks]
    n_channel_types = 0
    ch_types_used = []
    for t in channel_types:
        if t in types:
            n_channel_types += 1
            ch_types_used.append(t)

    axes_init = axes  # remember if axes where given as input

    fig = None
    if axes is None:
        fig, axes = plt.subplots(n_channel_types, 1)

    if isinstance(axes, plt.Axes):
        axes = [axes]
    elif isinstance(axes, np.ndarray):
        axes = list(axes)

    if axes_init is not None:
        fig = axes[0].get_figure()

    if not len(axes) == n_channel_types:
        raise ValueError('Number of axes (%g) must match number of channel '
                         'types (%g)' % (len(axes), n_channel_types))

    # instead of projecting during each iteration let's use the mixin here.
    if proj is True and evoked.proj is not True:
        evoked = evoked.copy()
        evoked.apply_proj()

    times = 1e3 * evoked.times  # time in miliseconds
    for ax, t in zip(axes, ch_types_used):
        ch_unit = units[t]
        this_scaling = scalings[t]
        if unit is False:
            this_scaling = 1.0
            ch_unit = 'NA'  # no unit
        idx = [picks[i] for i in range(len(picks)) if types[i] == t]
        if len(idx) > 0:
            # Parameters for butterfly interactive plots
            if plot_type == 'butterfly':
                if any([i in bad_ch_idx for i in idx]):
                    colors = ['k'] * len(idx)
                    for i in bad_ch_idx:
                        if i in idx:
                            colors[idx.index(i)] = 'r'

                    ax._get_lines.color_cycle = iter(colors)
                else:
                    ax._get_lines.color_cycle = cycle(['k'])
            # Set amplitude scaling
            D = this_scaling * evoked.data[idx, :]
            # plt.axes(ax)
            if plot_type == 'butterfly':
                ax.plot(times, D.T)
            elif plot_type == 'image':
                im = ax.imshow(D, interpolation='nearest', origin='lower',
                               extent=[times[0], times[-1], 0, D.shape[0]],
                               aspect='auto', cmap=cmap)
                if colorbar:
                    cbar = plt.colorbar(im, ax=ax)
                    cbar.ax.set_title(ch_unit)
            elif plot_type == 'mean' :
#                 ax.plot(times, D.mean(axis=0))
                ax.plot(times, np.abs(D).mean(axis=0))
            if xlim is not None:
                if xlim == 'tight':
                    xlim = (times[0], times[-1])
                ax.set_xlim(xlim)
            if ylim is not None and t in ylim:
                if plot_type == 'butterfly' or plot_type == 'mean':
                    ax.set_ylim(ylim[t])
                elif plot_type == 'image':
                    im.set_clim(ylim[t])
            ax.set_title(titles[t] + ' (%d channel%s)' % (
                         len(D), 's' if len(D) > 1 else ''))
            ax.set_xlabel('time (ms)')
            if plot_type == 'butterfly' or plot_type == 'mean':
                ax.set_ylabel('data (%s)' % ch_unit)
            elif plot_type == 'image':
                ax.set_ylabel('channels (%s)' % 'index')
            else:
                raise ValueError("plot_type has to be 'butterfly' or 'image'."
                                 "Got %s." % plot_type)

            if (plot_type == 'butterfly' or plot_type == 'mean') and (hline is not None):
                for h in hline:
                    ax.axhline(h, color='r', linestyle='--', linewidth=2)

    if axes_init is None:
        plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)

    # if proj == 'interactive':
    #     _check_delayed_ssp(evoked)
    #     params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
    #                   axes=axes, types=types, units=units, scalings=scalings,
    #                   unit=unit, ch_types_used=ch_types_used, picks=picks,
    #                   plot_update_proj_callback=_plot_update_evoked,
    #                   plot_type=plot_type)
    #     _draw_proj_checkbox(None, params)

    if show and plt.get_backend() != 'agg':
        plt.show()
        fig.canvas.draw()  # for axes plots update axes.
    tight_layout(fig=fig)

    return fig
예제 #37
0
def test_rank():
    """Test cov rank estimation"""
    raw_sample = Raw(raw_fname)

    raw_sss = Raw(hp_fif_fname)
    raw_sss.add_proj(compute_proj_raw(raw_sss))

    cov_sample = compute_raw_data_covariance(raw_sample)
    cov_sample_proj = compute_raw_data_covariance(
        raw_sample.copy().apply_proj())

    cov_sss = compute_raw_data_covariance(raw_sss)
    cov_sss_proj = compute_raw_data_covariance(
        raw_sss.copy().apply_proj())

    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)

    info_sample = pick_info(raw_sample.info, picks_all_sample)
    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
                                             eeg=True))]
    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
    picks_stack_sample += [('all',
                            pick_types(info_sample, meg=True, eeg=True))]

    info_sss = pick_info(raw_sss.info, picks_all_sss)
    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
    picks_stack_somato += [('all',
                            pick_types(info_sss, meg=True, eeg=True))]

    iter_tests = list(itt.product(
        [(cov_sample, picks_stack_sample, info_sample),
         (cov_sample_proj, picks_stack_sample, info_sample),
         (cov_sss, picks_stack_somato, info_sss),
         (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
        [dict(mag=1e15, grad=1e13, eeg=1e6)]
    ))

    for (cov, picks_list, this_info), scalings in iter_tests:
        for ch_type, picks in picks_list:

            this_very_info = pick_info(this_info, picks)

            # compute subset of projs
            this_projs = [c['active'] and
                          len(set(c['data']['col_names'])
                              .intersection(set(this_very_info['ch_names']))) >
                          0 for c in cov['projs']]
            n_projs = sum(this_projs)

            # count channel types
            ch_types = [channel_type(this_very_info, idx)
                        for idx in range(len(picks))]
            n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
                                    ['eeg', 'mag', 'grad']]
            n_meg = n_mag + n_grad
            if ch_type in ('all', 'eeg'):
                n_projs_eeg = 1
            else:
                n_projs_eeg = 0

            # check sss
            if 'proc_history' in this_very_info:
                mf = this_very_info['proc_history'][0]['max_info']
                n_free = _get_sss_rank(mf)
                if 'mag' not in ch_types and 'grad' not in ch_types:
                    n_free = 0
                # - n_projs XXX clarify
                expected_rank = n_free + n_eeg
                if n_projs > 0 and ch_type in ('all', 'eeg'):
                    expected_rank -= n_projs_eeg
            else:
                expected_rank = n_meg + n_eeg - n_projs

            C = cov['data'][np.ix_(picks, picks)]
            est_rank = _estimate_rank_meeg_cov(C, this_very_info,
                                               scalings=scalings)

            assert_equal(expected_rank, est_rank)
예제 #38
0
def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True,
                  res=64, axes=None, names=None, show_names=False, mask=None,
                  mask_params=None, outlines='head',
                  contours=6, image_interp='bilinear', show=True,
                  head_pos=None, onselect=None, extrapolate='box', border=0):
    import matplotlib.pyplot as plt
    from matplotlib.widgets import RectangleSelector
    data = np.asarray(data)

    if isinstance(pos, Info):  # infer pos from Info object
        picks = _pick_data_channels(pos)  # pick only data channels
        pos = pick_info(pos, picks)

        # check if there is only 1 channel type, and n_chans matches the data
        ch_type = {channel_type(pos, idx)
                   for idx, _ in enumerate(pos["chs"])}
        info_help = ("Pick Info with e.g. mne.pick_info and "
                     "mne.io.pick.channel_indices_by_type.")
        if len(ch_type) > 1:
            raise ValueError("Multiple channel types in Info structure. "
                             + info_help)
        elif len(pos["chs"]) != data.shape[0]:
            raise ValueError("Number of channels in the Info object and "
                             "the data array does not match. " + info_help)
        else:
            ch_type = ch_type.pop()

        if any(type_ in ch_type for type_ in ('planar', 'grad')):
            # deal with grad pairs
            from mne.channels.layout import (_merge_grad_data, find_layout,
                                             _pair_grad_sensors)
            picks, pos = _pair_grad_sensors(pos, find_layout(pos))
            data = _merge_grad_data(data[picks]).reshape(-1)
        else:
            picks = list(range(data.shape[0]))
            pos = _find_topomap_coords(pos, picks=picks)

    if data.ndim > 1:
        raise ValueError("Data needs to be array of shape (n_sensors,); got "
                         "shape %s." % str(data.shape))

    # Give a helpful error message for common mistakes regarding the position
    # matrix.
    pos_help = ("Electrode positions should be specified as a 2D array with "
                "shape (n_channels, 2). Each row in this matrix contains the "
                "(x, y) position of an electrode.")
    if pos.ndim != 2:
        error = ("{ndim}D array supplied as electrode positions, where a 2D "
                 "array was expected").format(ndim=pos.ndim)
        raise ValueError(error + " " + pos_help)
    elif pos.shape[1] == 3:
        error = ("The supplied electrode positions matrix contains 3 columns. "
                 "Are you trying to specify XYZ coordinates? Perhaps the "
                 "mne.channels.create_eeg_layout function is useful for you.")
        raise ValueError(error + " " + pos_help)
    # No error is raised in case of pos.shape[1] == 4. In this case, it is
    # assumed the position matrix contains both (x, y) and (width, height)
    # values, such as Layout.pos.
    elif pos.shape[1] == 1 or pos.shape[1] > 4:
        raise ValueError(pos_help)

    if len(data) != len(pos):
        raise ValueError("Data and pos need to be of same length. Got data of "
                         "length %s, pos of length %s" % (len(data), len(pos)))

    norm = min(data) >= 0
    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
    if cmap is None:
        cmap = 'Reds' if norm else 'RdBu_r'

    pos, outlines = _check_outlines(pos, outlines, head_pos)
    assert isinstance(outlines, dict)

    ax = axes if axes else plt.gca()
    _prepare_topomap(pos, ax)

    _use_default_outlines = any(k.startswith('head') for k in outlines)

    if _use_default_outlines:
        # prepare masking
        _autoshrink(outlines, pos, res)

    mask_params = _handle_default('mask_params', mask_params)

    # find mask limits
    xlim = np.inf, -np.inf,
    ylim = np.inf, -np.inf,
    mask_ = np.c_[outlines['mask_pos']]
    xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]),
                  np.max(np.r_[xlim[1], mask_[:, 0]]))
    ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]),
                  np.max(np.r_[ylim[1], mask_[:, 1]]))

    # interpolate the data, we multiply clip radius by 1.06 so that pixelated
    # edges of the interpolated image would appear under the mask
    head_radius = (None if extrapolate == 'local' else
                   outlines['clip_radius'][0] * 1.06)
    xi = np.linspace(xmin, xmax, res)
    yi = np.linspace(ymin, ymax, res)
    Xi, Yi = np.meshgrid(xi, yi)
    interp = _GridData(pos, extrapolate, head_radius, border).set_values(data)
    Zi = interp.set_locations(Xi, Yi)()

    # plot outline
    patch_ = None
    if 'patch' in outlines:
        patch_ = outlines['patch']
        patch_ = patch_() if callable(patch_) else patch_
        patch_.set_clip_on(False)
        ax.add_patch(patch_)
        ax.set_transform(ax.transAxes)
        ax.set_clip_path(patch_)
    if _use_default_outlines:
        from matplotlib import patches
        patch_ = patches.Ellipse((0, 0),
                                 2 * outlines['clip_radius'][0],
                                 2 * outlines['clip_radius'][1],
                                 clip_on=True,
                                 transform=ax.transData)

    # plot interpolated map
    im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
                   aspect='equal', extent=(xmin, xmax, ymin, ymax),
                   interpolation=image_interp)

    # This tackles an incomprehensible matplotlib bug if no contours are
    # drawn. To avoid rescalings, we will always draw contours.
    # But if no contours are desired we only draw one and make it invisible.
    linewidth = mask_params['markeredgewidth']
    no_contours = False
    if isinstance(contours, (np.ndarray, list)):
        pass  # contours precomputed
    elif contours == 0:
        contours, no_contours = 1, True
    if (Zi == Zi[0, 0]).all():
        cont = None  # can't make contours for constant-valued functions
    else:
        with warnings.catch_warnings(record=True):
            warnings.simplefilter('ignore')
            cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
                              linewidths=linewidth / 2.)
    if no_contours and cont is not None:
        for col in cont.collections:
            col.set_visible(False)

    if patch_ is not None:
        im.set_clip_path(patch_)
        if cont is not None:
            for col in cont.collections:
                col.set_clip_path(patch_)

    pos_x, pos_y = pos.T
    if sensors is not False and mask is None:
        _plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
    elif sensors and mask is not None:
        idx = np.where(mask)[0]
        ax.plot(pos_x[idx], pos_y[idx], **mask_params)
        idx = np.where(~mask)[0]
        _plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
    elif not sensors and mask is not None:
        idx = np.where(mask)[0]
        ax.plot(pos_x[idx], pos_y[idx], **mask_params)

    if isinstance(outlines, dict):
        _draw_outlines(ax, outlines)

    if show_names:
        if names is None:
            raise ValueError("To show names, a list of names must be provided"
                             " (see `names` keyword).")
        if show_names is True:
            def _show_names(x):
                return x
        else:
            _show_names = show_names
        show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
        for ii, (p, ch_id) in enumerate(zip(pos, names)):
            if ii not in show_idx:
                continue
            ch_id = _show_names(ch_id)
            ax.text(p[0], p[1], ch_id, horizontalalignment='center',
                    verticalalignment='center', size='x-small')

    if onselect is not None:
        ax.RS = RectangleSelector(ax, onselect=onselect)
    plt_show(show)
    return im, cont, interp, patch_
예제 #39
0
def plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True,
                 res=64, axes=None, names=None, show_names=False, mask=None,
                 mask_params=None, outlines='head', image_mask=None,
                 contours=6, image_interp='bilinear', show=True,
                 head_pos=None, onselect=None, axis=None):
    ''' see the docstring for mne.viz.plot_topomap,
        which i've simply modified to return more objects '''

    from matplotlib.widgets import RectangleSelector
    from mne.io.pick import (channel_type, pick_info, _pick_data_channels)
    from mne.utils import warn
    from mne.viz.utils import (_setup_vmin_vmax, plt_show)
    from mne.defaults import _handle_default
    from mne.channels.layout import _find_topomap_coords
    from mne.io.meas_info import Info
    from mne.viz.topomap import _check_outlines, _prepare_topomap, _griddata, _make_image_mask, _plot_sensors, \
        _draw_outlines

    data = np.asarray(data)

    if isinstance(pos, Info):  # infer pos from Info object
        picks = _pick_data_channels(pos)  # pick only data channels
        pos = pick_info(pos, picks)

        # check if there is only 1 channel type, and n_chans matches the data
        ch_type = set(channel_type(pos, idx)
                      for idx, _ in enumerate(pos["chs"]))
        info_help = ("Pick Info with e.g. mne.pick_info and "
                     "mne.channels.channel_indices_by_type.")
        if len(ch_type) > 1:
            raise ValueError("Multiple channel types in Info structure. " +
                             info_help)
        elif len(pos["chs"]) != data.shape[0]:
            raise ValueError("Number of channels in the Info object and "
                             "the data array does not match. " + info_help)
        else:
            ch_type = ch_type.pop()

        if any(type_ in ch_type for type_ in ('planar', 'grad')):
            # deal with grad pairs
            from ..channels.layout import (_merge_grad_data, find_layout,
                                           _pair_grad_sensors)
            picks, pos = _pair_grad_sensors(pos, find_layout(pos))
            data = _merge_grad_data(data[picks]).reshape(-1)
        else:
            picks = list(range(data.shape[0]))
            pos = _find_topomap_coords(pos, picks=picks)

    if data.ndim > 1:
        raise ValueError("Data needs to be array of shape (n_sensors,); got "
                         "shape %s." % str(data.shape))

    # Give a helpful error message for common mistakes regarding the position
    # matrix.
    pos_help = ("Electrode positions should be specified as a 2D array with "
                "shape (n_channels, 2). Each row in this matrix contains the "
                "(x, y) position of an electrode.")
    if pos.ndim != 2:
        error = ("{ndim}D array supplied as electrode positions, where a 2D "
                 "array was expected").format(ndim=pos.ndim)
        raise ValueError(error + " " + pos_help)
    elif pos.shape[1] == 3:
        error = ("The supplied electrode positions matrix contains 3 columns. "
                 "Are you trying to specify XYZ coordinates? Perhaps the "
                 "mne.channels.create_eeg_layout function is useful for you.")
        raise ValueError(error + " " + pos_help)
    # No error is raised in case of pos.shape[1] == 4. In this case, it is
    # assumed the position matrix contains both (x, y) and (width, height)
    # values, such as Layout.pos.
    elif pos.shape[1] == 1 or pos.shape[1] > 4:
        raise ValueError(pos_help)

    if len(data) != len(pos):
        raise ValueError("Data and pos need to be of same length. Got data of "
                         "length %s, pos of length %s" % (len(data), len(pos)))

    norm = min(data) >= 0
    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
    if cmap is None:
        cmap = 'Reds' if norm else 'RdBu_r'

    pos, outlines = _check_outlines(pos, outlines, head_pos)

    if axis is not None:
        axes = axis
        warn('axis parameter is deprecated and will be removed in 0.13. '
             'Use axes instead.', DeprecationWarning)
    ax = axes if axes else plt.gca()
    pos_x, pos_y = _prepare_topomap(pos, ax)
    if outlines is None:
        xmin, xmax = pos_x.min(), pos_x.max()
        ymin, ymax = pos_y.min(), pos_y.max()
    else:
        xlim = np.inf, -np.inf,
        ylim = np.inf, -np.inf,
        mask_ = np.c_[outlines['mask_pos']]
        xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]),
                      np.max(np.r_[xlim[1], mask_[:, 0]]))
        ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]),
                      np.max(np.r_[ylim[1], mask_[:, 1]]))

    # interpolate data
    xi = np.linspace(xmin, xmax, res)
    yi = np.linspace(ymin, ymax, res)
    Xi, Yi = np.meshgrid(xi, yi)
    Zi = _griddata(pos_x, pos_y, data, Xi, Yi)

    if outlines is None:
        _is_default_outlines = False
    elif isinstance(outlines, dict):
        _is_default_outlines = any(k.startswith('head') for k in outlines)

    if _is_default_outlines and image_mask is None:
        # prepare masking
        image_mask, pos = _make_image_mask(outlines, pos, res)

    mask_params = _handle_default('mask_params', mask_params)

    # plot outline
    linewidth = mask_params['markeredgewidth']
    patch = None
    if 'patch' in outlines:
        patch = outlines['patch']
        patch_ = patch() if callable(patch) else patch
        patch_.set_clip_on(False)
        ax.add_patch(patch_)
        ax.set_transform(ax.transAxes)
        ax.set_clip_path(patch_)

    # plot map and countour
    im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
                   aspect='equal', extent=(xmin, xmax, ymin, ymax),
                   interpolation=image_interp)

    # This tackles an incomprehensible matplotlib bug if no contours are
    # drawn. To avoid rescalings, we will always draw contours.
    # But if no contours are desired we only draw one and make it invisible .
    no_contours = False
    if contours in (False, None):
        contours, no_contours = 1, True
    cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
                      linewidths=linewidth)
    if no_contours is True:
        for col in cont.collections:
            col.set_visible(False)

    if _is_default_outlines:
        from matplotlib import patches
        patch_ = patches.Ellipse((0, 0),
                                 2 * outlines['clip_radius'][0],
                                 2 * outlines['clip_radius'][1],
                                 clip_on=True,
                                 transform=ax.transData)
    if _is_default_outlines or patch is not None:
        im.set_clip_path(patch_)
        if cont is not None:
            for col in cont.collections:
                col.set_clip_path(patch_)

    if sensors is not False and mask is None:
        _plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
    elif sensors and mask is not None:
        idx = np.where(mask)[0]
        ax.plot(pos_x[idx], pos_y[idx], **mask_params)
        idx = np.where(~mask)[0]
        _plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
    elif not sensors and mask is not None:
        idx = np.where(mask)[0]
        ax.plot(pos_x[idx], pos_y[idx], **mask_params)

    if isinstance(outlines, dict):
        _draw_outlines(ax, outlines)

    if show_names:
        if names is None:
            raise ValueError("To show names, a list of names must be provided"
                             " (see `names` keyword).")
        if show_names is True:
            def _show_names(x):
                return x
        else:
            _show_names = show_names
        show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
        for ii, (p, ch_id) in enumerate(zip(pos, names)):
            if ii not in show_idx:
                continue
            ch_id = _show_names(ch_id)
            ax.text(p[0], p[1], ch_id, horizontalalignment='center',
                    verticalalignment='center', size='x-small')

    plt.subplots_adjust(top=.95)

    if onselect is not None:
        ax.RS = RectangleSelector(ax, onselect=onselect)
    plt_show(show)
    return ax, im, cont, pos_x, pos_y
예제 #40
0
def select_vertices_in_sensor_range(inst,
                                    dist,
                                    info=None,
                                    picks=None,
                                    trans=None,
                                    indices=False,
                                    verbose=None):
    """Find vertices within given distance to a sensor.

    Parameters
    ----------
    inst : instance of Forward | instance of SourceSpaces
        The object to select vertices from.
    dist : float
        The minimum distance between a vertex and the nearest sensor. All
        vertices for which the distance to the nearest sensor exceeds this
        limit are discarded.
    info : instance of Info | None
        The info structure that contains information about the channels. Only
        needs to be specified if the object to select vertices from does is
        an instance of SourceSpaces.
    picks : array-like of int | None
        Indices of sensors to include in the search for the nearest sensor. If
        ``None``, the default, only MEG channels are used.
    trans : str | instance of Transform | None
        Either the full path to the head<->MRI transform ``*-trans.fif`` file
        produced during coregistration, or the Transformation itself. If trans
        is None, an identity matrix is assumed. Only needed when ``inst`` is a
        source space in MRI coordinates.
    indices: False | True
        If ``True``, return vertex indices instead of vertex numbers. Defaults
        to ``False``.
    verbose : bool | str | int | None
        If not None, override default verbose level (see :func:`mne.verbose`
        and :ref:`Logging documentation <tut_logging>` for more).

    Returns
    -------
    vertices : pair of lists | list of int
        Either a list of vertex numbers for the left and right hemisphere (if
        ``indices==False``) or a single list with vertex indices.

    See Also
    --------
    restrict_forward_to_vertices : restrict Forward to the given vertices
    restrict_src_to_vertices : restrict SourceSpaces to the given vertices
    """

    if isinstance(inst, Forward):
        info = inst['info']
        src = inst['src']
    elif isinstance(inst, SourceSpaces):
        src = inst
        if info is None:
            raise ValueError('You need to specify an Info object with '
                             'information about the channels.')

    # Load the head<->MRI transform if necessary
    if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
        if trans is None:
            raise ValueError('Source space is in MRI coordinates, but no '
                             'head<->MRI transform was given. Please specify '
                             'the full path to the appropriate *-trans.fif '
                             'file as the "trans" parameter.')
        if isinstance(trans, string_types):
            trans = read_trans(trans, return_all=True)
            for trans in trans:  # we got at least 1
                try:
                    trans = _ensure_trans(trans, 'head', 'mri')
                except Exception as exp:
                    pass
                else:
                    break
            else:
                raise exp

        src_trans = invert_transform(_ensure_trans(trans, 'head', 'mri'))
        print('Transform!')
    else:
        src_trans = Transform('head', 'head')  # Identity transform

    dev_to_head = _ensure_trans(info['dev_head_t'], 'meg', 'head')

    if picks is None:
        picks = pick_types(info, meg=True)
        if len(picks) > 0:
            logger.info('Using MEG channels')
        else:
            logger.info('Using EEG channels')
            picks = pick_types(info, eeg=True)

    src_pos = np.vstack([
        apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)])
        for s in src
    ])

    sensor_pos = []
    for ch in picks:
        # MEG channels are in device coordinates, translate them to head
        if channel_type(info, ch) in ['mag', 'grad']:
            sensor_pos.append(
                apply_trans(dev_to_head, info['chs'][ch]['loc'][:3]))
        else:
            sensor_pos.append(info['chs'][ch]['loc'][:3])
    sensor_pos = np.array(sensor_pos)

    # Find vertices that are within range of a sensor. We use a KD-tree for
    # speed.
    logger.info('Finding vertices within sensor range...')
    tree = cKDTree(sensor_pos)
    distances, _ = tree.query(src_pos, distance_upper_bound=dist)

    # Vertices out of range are flagged as np.inf
    src_sel = np.isfinite(distances)
    logger.info('[done]')

    if indices:
        return np.flatnonzero(src_sel)
    else:
        n_lh_verts = src[0]['nuse']
        lh_sel, rh_sel = src_sel[:n_lh_verts], src_sel[n_lh_verts:]
        vert_lh = src[0]['vertno'][lh_sel]
        vert_rh = src[1]['vertno'][rh_sel]
        return [vert_lh, vert_rh]
예제 #41
0
def test_bdf_stim_channel():
    """Test if last channel is detected as STIM by default."""
    raw_py = _test_raw_reader(read_raw_edf, input_fname=bdf_path)
    assert_true(channel_type(raw_py.info, raw_py.info["nchan"] - 1) == 'stim')
예제 #42
0
def test_rank():
    """Test cov rank estimation."""
    # Test that our rank estimation works properly on a simple case
    evoked = read_evokeds(ave_fname,
                          condition=0,
                          baseline=(None, 0),
                          proj=False)
    cov = read_cov(cov_fname)
    ch_names = [
        ch for ch in evoked.info['ch_names']
        if '053' not in ch and ch.startswith('EEG')
    ]
    cov = prepare_noise_cov(cov, evoked.info, ch_names, None)
    assert_equal(cov['eig'][0], 0.)  # avg projector should set this to zero
    assert_true((cov['eig'][1:] > 0).all())  # all else should be > 0

    # Now do some more comprehensive tests
    raw_sample = read_raw_fif(raw_fname)

    raw_sss = read_raw_fif(hp_fif_fname)
    raw_sss.add_proj(compute_proj_raw(raw_sss))

    cov_sample = compute_raw_covariance(raw_sample)
    cov_sample_proj = compute_raw_covariance(raw_sample.copy().apply_proj())

    cov_sss = compute_raw_covariance(raw_sss)
    cov_sss_proj = compute_raw_covariance(raw_sss.copy().apply_proj())

    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)

    info_sample = pick_info(raw_sample.info, picks_all_sample)
    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
                                             eeg=True))]
    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
    picks_stack_sample += [('all', pick_types(info_sample, meg=True,
                                              eeg=True))]

    info_sss = pick_info(raw_sss.info, picks_all_sss)
    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
    picks_stack_somato += [('all', pick_types(info_sss, meg=True, eeg=True))]

    iter_tests = list(
        itt.product(
            [(cov_sample, picks_stack_sample, info_sample),
             (cov_sample_proj, picks_stack_sample, info_sample),
             (cov_sss, picks_stack_somato, info_sss),
             (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
            [dict(mag=1e15, grad=1e13, eeg=1e6)]))

    for (cov, picks_list, this_info), scalings in iter_tests:
        for ch_type, picks in picks_list:

            this_very_info = pick_info(this_info, picks)

            # compute subset of projs
            this_projs = [
                c['active'] and len(
                    set(c['data']['col_names']).intersection(
                        set(this_very_info['ch_names']))) > 0
                for c in cov['projs']
            ]
            n_projs = sum(this_projs)

            # count channel types
            ch_types = [
                channel_type(this_very_info, idx) for idx in range(len(picks))
            ]
            n_eeg, n_mag, n_grad = [
                ch_types.count(k) for k in ['eeg', 'mag', 'grad']
            ]
            n_meg = n_mag + n_grad
            if ch_type in ('all', 'eeg'):
                n_projs_eeg = 1
            else:
                n_projs_eeg = 0

            # check sss
            if len(this_very_info['proc_history']) > 0:
                mf = this_very_info['proc_history'][0]['max_info']
                n_free = _get_sss_rank(mf)
                if 'mag' not in ch_types and 'grad' not in ch_types:
                    n_free = 0
                # - n_projs XXX clarify
                expected_rank = n_free + n_eeg
                if n_projs > 0 and ch_type in ('all', 'eeg'):
                    expected_rank -= n_projs_eeg
            else:
                expected_rank = n_meg + n_eeg - n_projs

            C = cov['data'][np.ix_(picks, picks)]
            est_rank = _estimate_rank_meeg_cov(C,
                                               this_very_info,
                                               scalings=scalings)

            assert_equal(expected_rank, est_rank)
예제 #43
0
def _channels_tsv(raw, fname, overwrite=False, verbose=True):
    """Create a channels.tsv file and save it.

    Parameters
    ----------
    raw : instance of Raw
        The data as MNE-Python Raw object.
    fname : str
        Filename to save the channels.tsv to.
    overwrite : bool
        Whether to overwrite the existing file.
        Defaults to False.
    verbose : bool
        Set verbose output to true or false.

    """
    # Get channel type mappings between BIDS and MNE nomenclatures
    map_chs = _get_ch_type_mapping(fro='mne', to='bids')

    # Prepare the descriptions for each channel type
    map_desc = defaultdict(lambda: 'Other type of channel')
    map_desc.update(meggradaxial='Axial Gradiometer',
                    megrefgradaxial='Axial Gradiometer Reference',
                    meggradplanar='Planar Gradiometer',
                    megmag='Magnetometer',
                    megrefmag='Magnetometer Reference',
                    stim='Trigger',
                    eeg='ElectroEncephaloGram',
                    ecog='Electrocorticography',
                    seeg='StereoEEG',
                    ecg='ElectroCardioGram',
                    eog='ElectroOculoGram',
                    emg='ElectroMyoGram',
                    misc='Miscellaneous')
    get_specific = ('mag', 'ref_meg', 'grad')

    # get the manufacturer from the file in the Raw object
    manufacturer = None

    _, ext = _parse_ext(raw.filenames[0], verbose=verbose)
    manufacturer = MANUFACTURERS[ext]

    ignored_channels = IGNORED_CHANNELS.get(manufacturer, list())

    status, ch_type, description = list(), list(), list()
    for idx, ch in enumerate(raw.info['ch_names']):
        status.append('bad' if ch in raw.info['bads'] else 'good')
        _channel_type = channel_type(raw.info, idx)
        if _channel_type in get_specific:
            _channel_type = coil_type(raw.info, idx, _channel_type)
        ch_type.append(map_chs[_channel_type])
        description.append(map_desc[_channel_type])
    low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])
    if raw._orig_units:
        units = [raw._orig_units.get(ch, 'n/a') for ch in raw.ch_names]
    else:
        units = [_unit2human.get(ch_i['unit'], 'n/a')
                 for ch_i in raw.info['chs']]
        units = [u if u not in ['NA'] else 'n/a' for u in units]
    n_channels = raw.info['nchan']
    sfreq = raw.info['sfreq']

    ch_data = OrderedDict([
        ('name', raw.info['ch_names']),
        ('type', ch_type),
        ('units', units),
        ('low_cutoff', np.full((n_channels), low_cutoff)),
        ('high_cutoff', np.full((n_channels), high_cutoff)),
        ('description', description),
        ('sampling_frequency', np.full((n_channels), sfreq)),
        ('status', status)])
    ch_data = _drop(ch_data, ignored_channels, 'name')

    _write_tsv(fname, ch_data, overwrite, verbose)

    return fname