########################################################################## # If you would like to specify the logging level when it is running, you can # use the standard python logging commands through the top-level moabb module import moabb from moabb.datasets import BNCI2014001, utils from moabb.evaluations import CrossSessionEvaluation from moabb.paradigms import LeftRightImagery from moabb.pipelines.features import LogVariance ########################################################################## # In order to create pipelines within a script, you will likely need at least # the make_pipeline function. They can also be specified via a .yml file. Here # we will make a couple pipelines just for convenience moabb.set_log_level("info") ############################################################################## # Create pipelines # ---------------- # # We create two pipelines: channel-wise log variance followed by LDA, and # channel-wise log variance followed by a cross-validated SVM (note that a # cross-validation via scikit-learn cannot be described in a .yml file). For # later in the process, the pipelines need to be in a dictionary where the key # is the name of the pipeline and the value is the Pipeline object pipelines = {} pipelines["AM+LDA"] = make_pipeline(LogVariance(), LDA()) parameters = {"C": np.logspace(-2, 2, 10)} clf = GridSearchCV(SVC(kernel="linear"), parameters)
import seaborn as sns from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline from mne.decoding import CSP from pyriemann.estimation import Covariances from pyriemann.tangentspace import TangentSpace from moabb.datasets import BNCI2014001c from moabb.paradigms import LeftRightImagery from moabb.evaluations import CrossSessionEvaluation moabb.set_log_level('info') ############################################################################## # Create pipelines # ---------------- # # Pipelines must be a dict of sklearn pipeline transformer. # # The csp implementation from MNE is used. We selected 8 CSP components, as # usually done in the litterature. # # The riemannian geometry pipeline consists in covariance estimation, tangent # space mapping and finaly a logistic regression for the classification. pipelines = {}
from braindecode.models import * #ShallowFBCSPNet, EEGNetv1 from skorch.callbacks import LRScheduler from skorch.helper import predefined_split from braindecode import EEGClassifier, EEGClassifier_weighted import matplotlib.pyplot as plt from matplotlib.lines import Line2D import pandas as pd import mne mne.set_log_level(False) import moabb moabb.set_log_level(False) # import sys # sys.path.insert(0, '/run/media/george/purple_data/codes/git_repos/skorch') # import skorch ###################################################################### def train(subject_id): print('\n--------------------------------------------------\n') print( 'Training on BCI_IV_2a dataset | Cross-subject | ID: {:02d}\n'.format( subject_id))
sessions = None else: raise ValueError( 'Currently, mixed subject:session and only subject syntax is not supported.' ) print(f'Subjects: {subjects}') print(f'Sessions: {sessions}') start_timestamp_as_str = dt.now().replace(microsecond=0).isoformat().replace( ":", "-") warnings.simplefilter(action='ignore', category=RuntimeWarning) warnings.simplefilter(action='ignore', category=UserWarning) warnings.simplefilter(action='ignore', category=FutureWarning) moabb.set_log_level('warn') np.random.seed(42) ############################################################################## # Create pipelines ############################################################################## labels_dict = {'Target': 1, 'NonTarget': 0} prepro_cfg = ana_cfg['default']['data_preprocessing'] bench_cfg = get_benchmark_config(dataset_name, prepro_cfg, subjects=subjects, sessions=sessions)