ts_lr = make_pipeline(Covariances(estimator='oas'), TangentSpace(metric='riemann'), LR(C=1.0)) results = evaluation.process({'csp+lda': csp_lda, 'ts+lr': ts_lr}) print(results.head()) ############################################################################## # Electrode selection # ------------------- # # It is possible to select the electrodes that are shared by all datasets # using the `find_intersecting_channels` function. Datasets that have 0 # overlap with others are discarded. It returns the set of common channels, # as well as the list of datasets with valid channels. electrodes, datasets = find_intersecting_channels(datasets) evaluation = WithinSessionEvaluation(paradigm=paradigm, datasets=datasets, overwrite=True) results = evaluation.process({'csp+lda': csp_lda, 'ts+lr': ts_lr}) print(results.head()) ############################################################################## # Plot results # ------------ # # Compare the obtained results with the two pipelines, CSP+LDA and logistic # regression computed in the tangent space of the covariance matrices. fig = moabb_plt.paired_plot(results, 'csp+lda', 'ts+lr') plt.show()
# ---------------- # # Here we plot the results using some of the convenience methods within the # toolkit. The score_plot visualizes all the data with one score per subject # for every dataset and pipeline. fig = moabb_plt.score_plot(results) plt.show() ############################################################################### # For a comparison of two algorithms, there is the paired_plot, which plots # performance in one versus the performance in the other over all chosen # datasets. Note that there is only one score per subject, regardless of the # number of sessions. fig = moabb_plt.paired_plot(results, "CSP+LDA", "RG+LDA") plt.show() ############################################################################### # Statistical testing and further plots # ---------------------------------------- # # If the statistical significance of results is of interest, the method # compute_dataset_statistics allows one to show a meta-analysis style plot as # well. For an overview of how all algorithms perform in comparison with each # other, the method find_significant_differences and the summary_plot are # possible. stats = compute_dataset_statistics(results) P, T = find_significant_differences(stats)
LogisticRegression(penalty="l1", solver="liblinear"), ) sk_eval = CrossSessionEvaluation( paradigm=paradigm, datasets=datasets, suffix="examples", overwrite=True, ) sk_res = sk_eval.process(sk_ppl) ############################################################################### # Combining results # ----------------- # # Even if the results have been obtained by different evaluation processes, it # possible to combine the resulting dataframes to analyze and plot the results. all_res = pd.concat([mne_res, adv_res, sk_res]) # We could compare the Euclidean and Riemannian performance using a `paired_plot` paired_plot(all_res, "XDAWN LR", "RG LR") # All the results could be compared and statistical analysis could highlight the # differences between pipelines. stats = compute_dataset_statistics(all_res) P, T = find_significant_differences(stats) summary_plot(P, T) plt.show()
# ---------------- # # Here we plot the results using some of the convenience methods within the # toolkit. The score_plot visualizes all the data with one score per subject # for every dataset and pipeline. fig = moabb_plt.score_plot(results) plt.show() ############################################################################### # For a comparison of two algorithms, there is the paired_plot, which plots # performance in one versus the performance in the other over all chosen # datasets. Note that there is only one score per subject, regardless of the # number of sessions. fig = moabb_plt.paired_plot(results, 'CSP + LDA', 'RG + LDA') plt.show() ############################################################################### # Statistical testing and further plots # ---------------------------------------- # # If the statistical significance of results is of interest, the method # compute_dataset_statistics allows one to show a meta-analysis style plot as # well. For an overview of how all algorithms perform in comparison with each # other, the method find_significant_differences and the summary_plot are # possible. stats = compute_dataset_statistics(results) P, T = find_significant_differences(stats)
ts_lr = make_pipeline(Covariances(estimator="oas"), TangentSpace(metric="riemann"), LR(C=1.0)) results = evaluation.process({"csp+lda": csp_lda, "ts+lr": ts_lr}) print(results.head()) ############################################################################## # Electrode selection # ------------------- # # It is possible to select the electrodes that are shared by all datasets # using the `find_intersecting_channels` function. Datasets that have 0 # overlap with others are discarded. It returns the set of common channels, # as well as the list of datasets with valid channels. electrodes, datasets = find_intersecting_channels(datasets) evaluation = WithinSessionEvaluation(paradigm=paradigm, datasets=datasets, overwrite=True) results = evaluation.process({"csp+lda": csp_lda, "ts+lr": ts_lr}) print(results.head()) ############################################################################## # Plot results # ------------ # # Compare the obtained results with the two pipelines, CSP+LDA and logistic # regression computed in the tangent space of the covariance matrices. fig = moabb_plt.paired_plot(results, "csp+lda", "ts+lr") plt.show()