Beispiel #1
0
 def test_dataset_search_fun(self):
     print([
         type(i).__name__
         for i in utils.dataset_search('imagery', multi_session=True)
     ])
     print([
         type(i).__name__
         for i in utils.dataset_search('imagery', multi_session=False)
     ])
     l = utils.dataset_search(
         'imagery',
         events=['right_hand', 'left_hand', 'feet', 'tongue', 'rest'])
     for out in l:
         print('multiclass: {}'.format(out.selected_events))
     l = utils.dataset_search('imagery',
                              events=['right_hand', 'feet'],
                              has_all_events=True)
     for out in l:
         print('rh/f: {}, {}'.format(
             type(out).__name__, out.selected_events))
     l = utils.dataset_search(
         'imagery',
         events=['right_hand', 'left_hand', 'feet', 'tongue', 'rest'],
         total_classes=2)
     for out in l:
         print('two class: {}'.format(out.selected_events))
Beispiel #2
0
 def test_dataset_channel_search(self):
     chans = ['C3', 'Cz']
     All = utils.dataset_search('imagery', events=[
         'right_hand', 'left_hand', 'feet', 'tongue', 'rest'])
     has_chans = utils.dataset_search('imagery', events=[
         'right_hand', 'left_hand', 'feet', 'tongue', 'rest'],
         channels=chans)
     has_types = set([type(x) for x in has_chans])
     for d in has_chans:
         s1 = d.get_data([1], False)[0][0][0]
         self.assertTrue(set(chans) <= set(s1.info['ch_names']))
     for d in All:
         if type(d) not in has_types:
             s1 = d.get_data([1], False)[0][0][0]
             self.assertFalse(set(chans) <= set(s1.info['ch_names']))
Beispiel #3
0
    def test_dataset_search_fun(self):
        found = utils.dataset_search("imagery", multi_session=True)
        print([type(dataset).__name__ for dataset in found])
        found = utils.dataset_search("imagery", multi_session=False)
        print([type(dataset).__name__ for dataset in found])
        res = utils.dataset_search(
            "imagery", events=["right_hand", "left_hand", "feet", "tongue", "rest"]
        )
        for out in res:
            print("multiclass: {}".format(out.event_id.keys()))

        res = utils.dataset_search(
            "imagery", events=["right_hand", "feet"], has_all_events=True
        )
        for out in res:
            self.assertTrue(set(["right_hand", "feet"]) <= set(out.event_id.keys()))
Beispiel #4
0
 def datasets(self):
     if self.tmax is None:
         interval = None
     else:
         interval = self.tmax - self.tmin
     return utils.dataset_search(
         paradigm="p300", events=self.events, interval=interval, has_all_events=True
     )
Beispiel #5
0
 def datasets(self):
     if self.tmax is None:
         interval = None
     else:
         interval = self.tmax - self.tmin
     return utils.dataset_search(paradigm='imagery',
                                 events=self.events,
                                 interval=interval,
                                 has_all_events=False)
Beispiel #6
0
 def datasets(self):
     if self.tmax is None:
         interval = None
     else:
         interval = self.tmax - self.tmin
     return utils.dataset_search(paradigm='ssvep',
                                 events=self.events,
                                 total_classes=self.n_classes,
                                 interval=interval,
                                 has_all_events=False)
Beispiel #7
0
    def test_dataset_search_fun(self):
        print([
            type(i).__name__
            for i in utils.dataset_search('imagery', multi_session=True)
        ])
        print([
            type(i).__name__
            for i in utils.dataset_search('imagery', multi_session=False)
        ])
        res = utils.dataset_search(
            'imagery',
            events=['right_hand', 'left_hand', 'feet', 'tongue', 'rest'])
        for out in res:
            print('multiclass: {}'.format(out.event_id.keys()))

        res = utils.dataset_search('imagery',
                                   events=['right_hand', 'feet'],
                                   has_all_events=True)
        for out in res:
            self.assertTrue(
                set(['right_hand', 'feet']) <= set(out.event_id.keys()))
Beispiel #8
0
 def test_dataset_channel_search(self):
     chans = ["C3", "Cz"]
     All = utils.dataset_search(
         "imagery", events=["right_hand", "left_hand", "feet", "tongue", "rest"]
     )
     has_chans = utils.dataset_search(
         "imagery",
         events=["right_hand", "left_hand", "feet", "tongue", "rest"],
         channels=chans,
     )
     has_types = set([type(x) for x in has_chans])
     for d in has_chans:
         s1 = d.get_data([1])[1]
         sess1 = s1[list(s1.keys())[0]]
         raw = sess1[list(sess1.keys())[0]]
         self.assertTrue(set(chans) <= set(raw.info["ch_names"]))
     for d in All:
         if type(d) not in has_types:
             s1 = d.get_data([1])[1]
             sess1 = s1[list(s1.keys())[0]]
             raw = sess1[list(sess1.keys())[0]]
             self.assertFalse(set(chans) <= set(raw.info["ch_names"]))
Beispiel #9
0
pipe = make_pipeline(LogVariance(), clf)

pipelines["AM+SVM"] = pipe

##############################################################################
# Datasets
# -----------------
#
# Datasets can be specified in many ways: Each paradigm has a property
# 'datasets' which returns the datasets that are appropriate for that paradigm

print(LeftRightImagery().datasets)

##########################################################################
# Or you can run a search through the available datasets:
print(utils.dataset_search(paradigm="imagery", min_subjects=6))

##########################################################################
# Or you can simply make your own list (which we do here due to computational
# constraints)

dataset = BNCI2014001()
dataset.subject_list = dataset.subject_list[:2]
datasets = [dataset]

##########################################################################
# Paradigm
# --------------------
#
# Paradigms define the events, epoch time, bandpass, and other preprocessing
# parameters. They have defaults that you can read in the documentation, or you
pipelines['AM + SVM'] = pipe

##############################################################################
# Datasets
# -----------------
#
# Datasets can be specified in many ways: Each paradigm has a property
# 'datasets' which returns the datasets that are appropriate for that paradigm

print(LeftRightImagery().datasets)


##########################################################################
# Or you can run a search through the available datasets:
print(utils.dataset_search(paradigm='imagery', total_classes=2))

##########################################################################
# Or you can simply make your own list (which we do here due to computational
# constraints)

datasets = [BNCI2014001()]

##########################################################################
# Paradigm
# --------------------
#
# Paradigms define the events, epoch time, bandpass, and other preprocessing
# parameters. They have defaults that you can read in the documentation, or you
# can simply set them as we do here. A single paradigm defines a method for
# going from continuous data to trial data of a fixed size. To learn more look
Beispiel #11
0
 def datasets(self):
     return utils.dataset_search(paradigm='imagery',
                                 events=self.events,
                                 has_all_events=True)
Beispiel #12
0
 def datasets(self):
     return utils.dataset_search(paradigm='imagery',
                                 events=['right_hand', 'left_hand'],
                                 has_all_events=True)
Beispiel #13
0
 def datasets(self):
     return utils.dataset_search(paradigm='imagery',
                                 total_classes=self.n_classes,
                                 has_all_events=True)
Beispiel #14
0
 def datasets(self):
     return utils.dataset_search(paradigm='imagery')
Beispiel #15
0
from collections import OrderedDict
from moabb.datasets import utils
from moabb.analysis import analyze

import mne
mne.set_log_level(False)

import logging
import coloredlogs
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
coloredlogs.install(level=logging.DEBUG)

datasets = utils.dataset_search('imagery',
                                events=['supination', 'hand_close'],
                                has_all_events=False,
                                min_subjects=2,
                                multi_session=False)

for d in datasets:
    d.subject_list = d.subject_list[:10]

paradigm = ImageryNClass(2)
context = WithinSessionEvaluation(paradigm=paradigm,
                                  datasets=datasets,
                                  random_state=42)

pipelines = OrderedDict()
pipelines['av+TS'] = make_pipeline(Covariances(estimator='oas'),
                                   TSclassifier())
pipelines['av+CSP+LDA'] = make_pipeline(Covariances(estimator='oas'), CSP(8),