コード例 #1
0
def test_unsupervised_spatial_filter():
    """Test unsupervised spatial filter."""
    from sklearn.decomposition import PCA
    from sklearn.kernel_ridge import KernelRidge
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    picks = picks[1:13:3]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    preload=True, baseline=None, verbose=False)

    # Test estimator
    assert_raises(ValueError, UnsupervisedSpatialFilter, KernelRidge(2))

    # Test fit
    X = epochs.get_data()
    n_components = 4
    usf = UnsupervisedSpatialFilter(PCA(n_components))
    usf.fit(X)
    usf1 = UnsupervisedSpatialFilter(PCA(n_components))

    # test transform
    assert_equal(usf.transform(X).ndim, 3)
    # test fit_transform
    assert_array_almost_equal(usf.transform(X), usf1.fit_transform(X))
    assert_equal(usf.transform(X).shape[1], n_components)
    assert_array_almost_equal(usf.inverse_transform(usf.transform(X)), X)

    # Test with average param
    usf = UnsupervisedSpatialFilter(PCA(4), average=True)
    usf.fit_transform(X)
    assert_raises(ValueError, UnsupervisedSpatialFilter, PCA(4), 2)
コード例 #2
0
def kernel_pca_per_label(num_components, epochs, kernel='linear', plot=True):
    data = epochs.get_data()

    all_labels = epochs.events[:, 2]

    for label in range(8):
        label_inds = np.where(all_labels == label)
        data_per_label = data[label_inds]

        pca = UnsupervisedSpatialFilter(KernelPCA(num_components,
                                                  kernel=kernel),
                                        average=False)
        print('fitting pca for label {} and kernel {}'.format(label, kernel))
        pca_data = pca.fit_transform(data_per_label)
        print('fitting done')

        if label == 0:
            all_pca = pca_data
        else:
            all_pca = np.concatenate((all_pca, pca_data))

        if plot:
            info = mne.create_info(pca_data.shape[1], epochs.info['sfreq'])

            ev = mne.EvokedArray(np.mean(pca_data, axis=0), info=info)

            ev.plot(show=False,
                    window_title="PCA",
                    time_unit='s',
                    titles="Kernel PCA for label {} and kernel".format(
                        label, kernel))
            plt.axvline(x=0.15, color='b', linestyle='--')
            plt.show()

    return all_pca
コード例 #3
0
ファイル: processing.py プロジェクト: jtn-b/Mind-ID
def ApplyPCA(raw, n):
    SetPaths()
    dictionary = {"T2": 100}
    eves = mne.events_from_annotations(raw, dictionary)
    events = eves[0]
    events_ids = {"target/stimulus": 100}
    epochs = mne.Epochs(raw, events, event_id=events_ids, preload=True)
    fig = epochs.plot()
    fig.savefig(PLOT_PATH + '/' + 'raw_epochs.png')
    fig = epochs.plot_psd()
    fig.savefig(PLOT_PATH + '/' + 'epochs_psd.png')
    from mne.decoding import UnsupervisedSpatialFilter
    from sklearn.decomposition import PCA
    X = epochs.get_data()
    pca = UnsupervisedSpatialFilter(PCA(n), average=False)
    pca_data = pca.fit_transform(X)
    tmin, tmax = -0.1, 0.3
    ev = mne.EvokedArray(np.mean(pca_data, axis=0),
                         mne.create_info(n,
                                         epochs.info['sfreq'],
                                         ch_types='eeg'),
                         tmin=tmin)
    fig = ev.plot(show=False, window_title="PCA", time_unit='s')
    fig.savefig(PLOT_PATH + '/' + 'PCA_15_Channels.png')
    fig = ev.plot_image()
    fig.savefig(PLOT_PATH + '/' + 'EvokedData_As_Image.png')

    epoch_avg = np.mean(pca_data, axis=0)
    return pca_data, epoch_avg
コード例 #4
0
def kernel_pca(num_components, epochs, kernel='linear', label=-1, plot=True):
    data = epochs.get_data()

    pca = UnsupervisedSpatialFilter(KernelPCA(num_components, kernel=kernel),
                                    average=False)
    print('fitting pca')
    pca_data = pca.fit_transform(data)
    print('fitting done')

    if label != -1:
        all_labels = epochs.events[:, 2]
        inds_to_keep = np.where(all_labels == label)
        pca_subdata = pca_data[inds_to_keep]
        pca_data = pca_subdata

    if plot:
        info = mne.create_info(pca_data.shape[1], epochs.info['sfreq'])

        ev = mne.EvokedArray(np.mean(pca_data, axis=0), info=info)
        if label != -1:
            ev.plot(show=False,
                    window_title="PCA",
                    time_unit='s',
                    titles="Kernel PCA for label {}".format(label))
        else:
            ev.plot(show=False, window_title="PCA", time_unit='s')
        plt.axvline(x=0.15, color='b', linestyle='--')
        plt.show()

    return pca_data
コード例 #5
0
    def cvlr(C,
             n_features,
             pupil_diff_thresh=None,
             threshold=None,
             X_raw=X_raw,
             y_raw=y_raw):
        n_features = int(n_features)
        # X, y = exclude_eyes(X_raw, y_raw, threshold=threshold, pupil_diff_thresh=pupil_diff_thresh, timepoints=10)
        X, y = (X_raw, y_raw)
        X = X[:, 29:302, best_idx - 5:best_idx + 5]
        if X.shape[0] > 200:

            pca = UnsupervisedSpatialFilter(PCA(n_features), average=False)
            pca_data = pca.fit_transform(X)
            # X_raw = pca_data

            clf = make_pipeline(
                StandardScaler(),
                LogisticRegression(multi_class='multinomial',
                                   C=C,
                                   penalty='l2',
                                   solver='saga',
                                   tol=0.01))
            # clf = make_pipeline(StandardScaler(), LogisticRegression(multi_class='ovr', C=C, penalty='l2', tol=0.01))

            cv = KFold(3)  # CV
            shifts = np.arange(-4,
                               5)  # Additional timepoints to use as features
            # shifts = [0]
            accuracy = []

            for n, (train_index,
                    test_index) in enumerate(cv.split(pca_data[..., 0])):
                print("Fold {0} / 3".format(n + 1))

                # Add features + samples to X/y training data and test data
                X_train, y_train = add_features(pca_data[train_index, :, :],
                                                shifts, y[train_index])
                X_test, y_test = add_features(pca_data[test_index, :, :],
                                              shifts, y[test_index])

                # Add samples to training data
                # X_train, y_train = augment_samples(X_train, shifts, y_train)

                # Fit the classifier to training data and predict on held out data
                clf.fit(
                    X_train[..., 5], y_train
                )  # X represents timepoints 5 either side of the best index
                y_pred = clf.predict(X_test[..., 5])

                accuracy.append(recall_score(y_test, y_pred, average='macro'))

            acc = np.mean(accuracy)

        else:

            acc = 0

        return acc
コード例 #6
0
ファイル: processing.py プロジェクト: RussellJi/PyProject
 def pca_filter(self, data, parameter_list):
     pca = UnsupervisedSpatialFilter(PCA(parameter_list[-1]), average=False)
     data = data[0:parameter_list[-1], :]
     print(data.shape)
     eeg_data = np.array([data])
     pca_data = pca.fit_transform(eeg_data)
     filter_data = pca_data[0]
     return filter_data
コード例 #7
0
def PCA_score(Beta, Labels, boxcar, hilb_type):

    labelsR = np.concatenate((Labels[0][0], Labels[0][1]))
    labelsL = np.concatenate((Labels[1][0], Labels[1][1]))

    def Boxcar(data, N):

        fdata = np.zeros((data.shape))
        for i in range(data.shape[0]):
            for q in range(data.shape[1]):
                for k in range(data.shape[2]):
                    if k < N:
                        fdata[i, q, k] = data[i, q, k]
                    else:
                        fdata[i, q, k] = np.sqrt(
                            np.mean(data[i, q, (k - (N - 1)):(k + 1)]**2))
#                      fdata[i,q,k] = (np.mean(data[i,q,(k-(N-1)):(k+1)]))

# depending on if you want to use RMS vs just mean,
#try out with both, see what works better for you

        return (fdata)

    from mne.decoding import UnsupervisedSpatialFilter
    from sklearn.decomposition import PCA
    #of note, here PCA also allows an unsupervised Spatial Filter
    pcaR = UnsupervisedSpatialFilter(PCA(3), average=False)

    if hilb_type == 'amp':

        pca_data_AEFL = pcaR.fit_transform(Beta[0][0][0])
        pca_data_WORD = pcaR.fit_transform(Beta[0][1][0])
        pca_L = np.concatenate((pca_data_AEFL, pca_data_WORD))
        pca_data_AEFR = pcaR.fit_transform(Beta[1][0][0])
        pca_data_WORD_R = pcaR.fit_transform(Beta[1][1][0])
        pca_RR = np.concatenate((pca_data_AEFR, pca_data_WORD_R))

    if hilb_type == 'phase':

        pca_data_AEFL = pcaR.fit_transform(Beta[0][0][1])
        pca_data_WORD = pcaR.fit_transform(Beta[0][1][1])
        pca_L = np.concatenate((pca_data_AEFL, pca_data_WORD))
        pca_data_AEFR = pcaR.fit_transform(Beta[1][0][1])
        pca_data_WORD_R = pcaR.fit_transform(Beta[1][1][1])
        pca_RR = np.concatenate((pca_data_AEFR, pca_data_WORD_R))

    labelsR = labelsR[0:len(pca_RR)]
    labelsL = labelsL[0:len(pca_L)]
    labels_data = labelsL, labelsR

    # Apply Boxcar Filter - based on boxcar length specific in upper level functions
    pca_RR = Boxcar(pca_RR, boxcar)
    pca_L = Boxcar(pca_L, boxcar)

    return (pca_L, pca_RR, labels_data)
コード例 #8
0
ファイル: processing.py プロジェクト: jtn-b/Mind-ID
def PredApplyPCA(raw, n):
    dictionary = {"T2": 100}
    eves = mne.events_from_annotations(raw, dictionary)
    events = eves[0]
    events_ids = {"target/stimulus": 100}
    epochs = mne.Epochs(raw, events, event_id=events_ids, preload=True)
    from mne.decoding import UnsupervisedSpatialFilter
    from sklearn.decomposition import PCA
    X = epochs.get_data()
    pca = UnsupervisedSpatialFilter(PCA(n), average=False)
    pca_data = pca.fit_transform(X)
    epoch_avg = np.mean(pca_data, axis=0)
    return pca_data, epoch_avg
コード例 #9
0
def applyPCA(components, examples, targets):
    tmin, tmax = -0.1, 0.3
    channel_names = np.loadtxt('./metadata/channel_names.csv', dtype=str)
    epochs_info = create_info(channel_names[0:components].tolist(),
                              240,
                              ch_types='eeg',
                              montage='biosemi64')
    pca = UnsupervisedSpatialFilter(PCA(components), average=False)
    pca_data = pca.fit_transform(examples)
    ev = mne.EvokedArray(np.mean(pca_data, axis=0), epochs_info, tmin=tmin)
    ev.plot(show=False, window_title="PCA", time_unit='s')
    plt.savefig('last_pca_plot.png', dpi=300)
    return examples, targets
コード例 #10
0
def save_wavelet_complex(n_components):
    all_x_train_samples = []

    for sample in range(1, 22):
        print("sample {}".format(sample))
        epochs = get_epochs(sample, scale=False)
        freqs = np.logspace(*np.log10([2, 15]), num=15)
        n_cycles = freqs / 4.

        print("applying morlet wavelet")
        wavelet_output = tfr_array_morlet(epochs.get_data(),
                                          sfreq=epochs.info['sfreq'],
                                          freqs=freqs,
                                          n_cycles=n_cycles,
                                          output='complex')

        all_x_train_freqs = []

        for freq in range(wavelet_output.shape[2]):
            print("frequency: {}".format(freqs[freq]))

            wavelet_epochs = wavelet_output[:, :, freq, :]
            wavelet_epochs = np.append(wavelet_epochs.real,
                                       wavelet_epochs.imag,
                                       axis=1)

            wavelet_info = mne.create_info(ch_names=wavelet_epochs.shape[1],
                                           sfreq=epochs.info['sfreq'],
                                           ch_types='mag')
            wavelet_epochs = mne.EpochsArray(wavelet_epochs,
                                             info=wavelet_info,
                                             events=epochs.events)

            pca = UnsupervisedSpatialFilter(PCA(n_components=n_components),
                                            average=False)
            print('fitting pca')
            reduced = pca.fit_transform(wavelet_epochs.get_data())
            print('fitting done')

            x_train = reduced.transpose(0, 2, 1).reshape(-1, reduced.shape[1])
            all_x_train_freqs.append(x_train)

        all_x_train_samples.append(all_x_train_freqs)

    print('saving x_train for all samples')
    pickle.dump(
        all_x_train_samples,
        open(
            "DataTransformed/wavelet_complex/15hz/pca_{}/x_train_all_samples.pkl"
            .format(n_components), "wb"))
    print("x_train saved")
コード例 #11
0
ファイル: cca_try.py プロジェクト: RussellJi/PyProject
def eeg_signals():
    #获取原始数据,根据采样率大小取出1s的数据
    eeg_data = board.get_current_board_data(sampling_rate)[0:9]
    # 带通滤波处理(0.5-50),中心频率25.25,带宽49.5
    eeg_channels = BoardShim.get_eeg_channels(0)
    for count, channel in enumerate(eeg_channels):
        eeg_data[channel] = eeg_data[channel] - np.average(eeg_data[channel])
        DataFilter.perform_bandpass(eeg_data[channel],
                                    BoardShim.get_sampling_rate(2), 25.25,
                                    49.5, 3, FilterTypes.BESSEL.value, 0)
    eeg_data = eeg_data[1:9]
    eeg_data = np.array([eeg_data])
    pca = UnsupervisedSpatialFilter(PCA(8), average=False)
    eeg_data = pca.fit_transform(eeg_data)
    eeg_data = eeg_data[0]
    return eeg_data
コード例 #12
0
ファイル: processing.py プロジェクト: RussellJi/PyProject
    def ica_filter(self, data, parameter_list):

        pca = UnsupervisedSpatialFilter(FastICA(parameter_list[-1], tol=1),
                                        average=False)
        data = data[0:parameter_list[-1], :]
        # print(data.shape)
        eeg_data = np.array([data])
        ica_data = pca.fit_transform(eeg_data)
        filter_data = ica_data[0]
        """
        ica = FastICA(n_components=parameter_list[-1], tol=0.1)
        # print(ica.n_iter_)
        data = data[0:parameter_list[-1], :].T
        filter_data = ica.fit_transform(data)
        print(ica.tol)
        print(ica.n_iter_)
        filter_data = filter_data.T
        """
        return filter_data
コード例 #13
0
def ica(num_components, epochs, plot=True):
    data = epochs.get_data()

    ica = UnsupervisedSpatialFilter(FastICA(n_components=num_components,
                                            max_iter=2000),
                                    average=False)
    print('fitting ica')
    ica_data = ica.fit_transform(data)
    print('fitting done')

    info = mne.create_info(ica_data.shape[1], epochs.info['sfreq'])

    if plot:
        ev = mne.EvokedArray(np.mean(ica_data, axis=0), info=info)
        ev.plot(show=False, window_title="ICA", time_unit='s', titles="ICA")
        plt.axvline(x=0.15, color='b', linestyle='--')
        plt.show()

    return ica_data
epochs = mne.Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=False,
                    picks=picks,
                    baseline=None,
                    preload=True,
                    verbose=False)

X = epochs.get_data()

##############################################################################
# Transform data with PCA computed on the average ie evoked response
pca = UnsupervisedSpatialFilter(PCA(30), average=False)
pca_data = pca.fit_transform(X)
ev = mne.EvokedArray(np.mean(pca_data, axis=0),
                     mne.create_info(30, epochs.info['sfreq'], ch_types='eeg'),
                     tmin=tmin)
ev.plot(show=False, window_title="PCA")

##############################################################################
# Transform data with ICA computed on the raw epochs (no averaging)
ica = UnsupervisedSpatialFilter(FastICA(30), average=False)
ica_data = ica.fit_transform(X)
ev1 = mne.EvokedArray(np.mean(ica_data, axis=0),
                      mne.create_info(30, epochs.info['sfreq'],
                                      ch_types='eeg'),
                      tmin=tmin)
ev1.plot(show=False, window_title='ICA')
コード例 #15
0
ファイル: decoding.py プロジェクト: drordotan/digposmeg
def decode(epochs,
           get_y_label_func,
           epoch_filter=None,
           decoding_method='standard',
           sliding_window_size=None,
           sliding_window_step=None,
           n_jobs=multiprocessing.cpu_count(),
           equalize_event_counts=True,
           only_fit=False,
           generalize_across_time=True):
    """
    Basic flow for decoding
    """

    config = dict(equalize_event_counts=equalize_event_counts,
                  only_fit=only_fit,
                  sliding_window_size=sliding_window_size,
                  sliding_window_step=sliding_window_step,
                  decoding_method=decoding_method,
                  generalize_across_time=generalize_across_time,
                  epoch_filter=str(epoch_filter))

    if epoch_filter is not None:
        epochs = epochs[epoch_filter]

    #-- Classify epochs into groups (training epochs)
    y_labels = get_y_label_func(epochs)

    if equalize_event_counts:
        epochs.events[:, 2] = y_labels
        epochs.event_id = {str(label): label for label in np.unique(y_labels)}
        min_n_items_per_y_label = min(
            [len(epochs[cond]) for cond in epochs.event_id.keys()])
        print("\nEqualizing the number of epochs to %d per condition..." %
              min_n_items_per_y_label)
        epochs.equalize_event_counts(epochs.event_id.keys())
        y_labels = epochs.events[:, 2]

    print("The epochs were classified into %d groups:" % len(set(y_labels)))
    for g in set(y_labels):
        print("Group {:}: {:} epochs".format(g, sum(np.array(y_labels) == g)))

    #-- Create the decoding pipeline
    print("Creating the classification pipeline...")

    epochs_data = epochs.get_data()

    preprocess_pipeline = None

    if decoding_method.startswith('standard'):

        if 'reg' in decoding_method:
            clf = make_pipeline(StandardScaler(), Ridge())
        else:
            clf = make_pipeline(
                StandardScaler(),
                svm.SVC(C=1, kernel='linear', class_weight='balanced'))

        if 'raw' not in decoding_method:
            assert sliding_window_size is not None
            assert sliding_window_step is not None
            preprocess_pipeline = \
                make_pipeline(umne.transformers.SlidingWindow(window_size=sliding_window_size, step=sliding_window_step, average=True))

    elif decoding_method == 'ERP_cov':
        clf = make_pipeline(
            UnsupervisedSpatialFilter(PCA(20), average=False),
            ERPCovariances(
                estimator='lwf'),  # todo how to apply sliding window?
            CSP(30, log=False),
            TangentSpace('logeuclid'),
            LogisticRegression('l2'))  # todo why logistic regression?

    elif decoding_method == 'Xdawn_cov':
        clf = make_pipeline(
            UnsupervisedSpatialFilter(PCA(50), average=False),
            XdawnCovariances(12, estimator='lwf', xdawn_estimator='lwf'),
            TangentSpace('logeuclid'), LogisticRegression('l2'))

    elif decoding_method == 'Hankel_cov':
        clf = make_pipeline(
            UnsupervisedSpatialFilter(PCA(70), average=False),
            HankelCovariances(delays=[1, 8, 12, 64], estimator='oas'),
            CSP(15, log=False), TangentSpace('logeuclid'),
            LogisticRegression('l2'))

    else:
        raise Exception('Unknown decoding method: {:}'.format(decoding_method))

    print('\nDecoding pipeline:')
    for i in range(len(clf.steps)):
        print('Step #{:}: {:}'.format(i + 1, clf.steps[i][1]))

    if preprocess_pipeline is not None:
        print('\nApplying the pre-processing pipeline:')
        for i in range(len(preprocess_pipeline.steps)):
            print('Step #{:}: {:}'.format(i + 1,
                                          preprocess_pipeline.steps[i][1]))
        epochs_data = preprocess_pipeline.fit_transform(epochs_data)

    if only_fit:

        #-- Only fit the decoders

        procedure = 'only_fit'
        scores = None
        cv = None

        if decoding_method.startswith('standard'):
            if 'reg' in decoding_method:
                if 'r2' in decoding_method:
                    scoring = metrics.make_scorer(metrics.r2_score)
                else:
                    scoring = metrics.make_scorer(metrics.mean_squared_error)
            else:
                scoring = 'accuracy'
            if generalize_across_time:
                estimator = GeneralizingEstimator(clf,
                                                  scoring=scoring,
                                                  n_jobs=n_jobs)
            else:
                estimator = SlidingEstimator(clf,
                                             scoring=scoring,
                                             n_jobs=n_jobs)
        else:
            estimator = clf

        estimator.fit(X=epochs_data, y=y_labels)

    else:

        #-- Classify & score -- cross-validation

        procedure = 'fit_and_score'
        print(
            "\nCreating a classifier and calculating accuracy scores (this may take some time)..."
        )

        cv = StratifiedKFold(n_splits=5)
        if decoding_method.startswith('standard'):
            if 'reg' in decoding_method:
                if 'r2' in decoding_method:
                    scoring = metrics.make_scorer(metrics.r2_score)
                else:
                    scoring = metrics.make_scorer(metrics.mean_squared_error)

            else:
                scoring = 'accuracy'
            if generalize_across_time:
                estimator = GeneralizingEstimator(clf,
                                                  scoring=scoring,
                                                  n_jobs=n_jobs)
            else:
                estimator = SlidingEstimator(clf,
                                             scoring=scoring,
                                             n_jobs=n_jobs)

            scores = cross_val_multiscore(estimator=estimator,
                                          X=epochs_data,
                                          y=np.array(y_labels),
                                          cv=cv)
        else:
            scores = _run_cross_validation(X=epochs_data,
                                           y=np.array(y_labels),
                                           clf=clf,
                                           cv=cv)
            estimator = 'None'  # Estimator is not defined in the case of Riemannian decoding

    times = np.linspace(epochs.tmin, epochs.tmax, epochs_data.shape[2])

    return dict(procedure=procedure,
                estimator=estimator,
                scores=scores,
                pipeline=clf,
                preprocess=preprocess_pipeline,
                cv=cv,
                times=times,
                config=config)
コード例 #16
0
ファイル: riemaniann.py プロジェクト: Junist96/OCL
def pyR_decoding_on_full_epochs(X,
                                y,
                                plot_conf_matrix=0,
                                class_names=None,
                                test_size=0.2,
                                n_splits=5,
                                classifier='ERP_cov'):
    """ This function decodes on the full epoch using the pyRiemannian decoder
    cf https://github.com/Team-BK/Biomag2016/blob/master/Final_Submission.ipynb

    Parameters
    ---------
    X : data extracted from the epochs provided to the decoder
    y : categorical variable (i.e. discrete but it can be more then 2 categories)
    plot_confusion_matrix : set to 1 if you wanna see the confusion matrix
    class_names: needed for the legend if confusion matrices are plotted ['cat1','cat2','cat3']
    test_size : proportion of the data on which you wanna test the decoder
    n_splits : when calculating the score, number of cross-validation folds
    classifier : set it to 'ERP_cov', 'Xdawn_cov' or 'Hankel_cov' depending on the classification you want to do.

    Returns: scores, y_test, y_pred, cnf_matrix or just scores if you don't want the confusion matrix
    -------

    """

    # ------- define the classifier -------
    if classifier == 'ERP_cov':
        spatial_filter = UnsupervisedSpatialFilter(PCA(20), average=False)
        ERP_cov = ERPCovariances(estimator='lwf')
        CSP_30 = CSP(30, log=False)
        tang = TangentSpace('logeuclid')
        clf = make_pipeline(spatial_filter, ERP_cov, CSP_30, tang,
                            LogisticRegression('l2'))

    if classifier == 'Xdawn_cov':
        clf = make_pipeline(
            UnsupervisedSpatialFilter(PCA(50), average=False),
            XdawnCovariances(12, estimator='lwf', xdawn_estimator='lwf'),
            TangentSpace('logeuclid'), LogisticRegression('l2'))

    if classifier == 'Hankel_cov':
        clf = make_pipeline(
            UnsupervisedSpatialFilter(PCA(70), average=False),
            HankelCovariances(delays=[1, 8, 12, 64], estimator='oas'),
            CSP(15, log=False), TangentSpace('logeuclid'),
            LogisticRegression('l2'))

    cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=4343)
    y = np.asarray(y)
    scores = []
    for train_index, test_index in cv.split(X, y):
        print(train_index)
        print(test_index)
        print('we are in the CV loop')
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        # Train on X_train, y_train
        clf.fit(X_train, y_train)
        # Predict the category on X_test
        y_pred = clf.predict(X_test)

        scores.append(accuracy_score(y_true=y_test, y_pred=y_pred))
    scores = np.asarray(scores)

    if plot_conf_matrix == 1:

        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=test_size, random_state=7, stratify=y)
        print('train and test have been split')
        y_pred = clf.fit(X_train, y_train).predict(X_test)
        # Compute confusion matrix
        cnf_matrix = confusion_matrix(y_test, y_pred)
        np.set_printoptions(precision=2)
        print(cnf_matrix)

        # Plot non-normalized confusion matrix
        plt.figure()
        plot_confusion_matrix(cnf_matrix,
                              classes=class_names,
                              title='Confusion matrix, without normalization')

        # Plot normalized confusion matrix
        plt.figure()
        plot_confusion_matrix(cnf_matrix,
                              classes=class_names,
                              normalize=True,
                              title='Normalized confusion matrix')

        plt.show()
        return scores, y_test, y_pred, cnf_matrix

    return scores, y_test, y_pred, cnf_matrix
コード例 #17
0
mne.combine_evoked([evoked_zero, -evoked_one],
                   weights='equal').plot_joint(**joint_kwargs)

##Apply fft
#fft_data = []
#for epochs_idx in range(len(epochs_data)):
#    fft_data.append(abs(fft2(epochs_data[epochs_idx]))/sum(epochs_data[epochs_idx]))
#
#fft_data = np.array(fft_data)

##Apply PCA to the epochs_data
#pca = UnsupervisedSpatialFilter(PCA(14), average=False)
#pca_data = pca.fit_transform(epochs_data)

#Apply ICA to the epochs_data
ica = UnsupervisedSpatialFilter(FastICA(len(picks)), average=False)
ica_data = ica.fit_transform(epochs_data)

##normalizing ICA data
#for epochs_idx in range(len(ica_data)):
#    for channels_idx in range(14):
#        ica_data[epochs_idx,channels_idx] /= ica_data[epochs_idx].sum()

ica_data_reshape = ica_data.reshape(
    (ica_data.shape[0], ica_data.shape[1] * ica_data.shape[2]))

#------------------------------------------------------------------------------

#Checking ICA through plot

method = 'fastica'
コード例 #18
0
ファイル: Guia_2.py プロジェクト: jegonza66/BCI
info = mne.create_info(ch_names=channel_names[:],
                       sfreq=sampling_freq,
                       ch_types=tipos[:],
                       montage='standard_1020')
channel_pos = []
for i in range(len(info['chs'])):
    channel_pos.append(info['chs'][i]['loc'][:2])

datos = data['Datos']
datos = np.swapaxes(datos, 0, 2)
datos_eeg = datos[:, :-3, :]
tiempo = [i / sampling_freq for i in range(len(datos[0][0]))]

#APLICAR PCA
sk_pca = PCA()
pca = UnsupervisedSpatialFilter(sk_pca, average=False)
data_pca = pca.fit_transform(datos_eeg)

#PLOT PRINCIPAL COMPONENTS
W = sk_pca.components_
M = np.linalg.inv(W)
M = M.transpose()

fig = plt.figure(figsize=(15, 25))
for i in range(len(M)):
    ax1 = fig.add_subplot(3, 9, (i + 1))
    ax1.set_title('PCA {}'.format(i))
    mne.viz.plot_topomap(M[i], np.array(channel_pos)[:-3], axes=ax1)
fig.tight_layout()
fig.suptitle('Principal components')
コード例 #19
0
epochs = mne.Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=False,
                    picks=picks,
                    baseline=None,
                    preload=True,
                    verbose=False)

X = epochs.get_data()

##############################################################################
# Transform data with PCA computed on the average ie evoked response
pca = UnsupervisedSpatialFilter(PCA(30), average=False)
pca_data = pca.fit_transform(X)
ev = mne.EvokedArray(np.mean(pca_data, axis=0),
                     mne.create_info(30, epochs.info['sfreq'], ch_types='eeg'),
                     tmin=tmin)
ev.plot(show=False, window_title="PCA", time_unit='s')

##############################################################################
# Transform data with ICA computed on the raw epochs (no averaging)
ica = UnsupervisedSpatialFilter(FastICA(30, whiten='unit-variance'),
                                average=False)
ica_data = ica.fit_transform(X)
ev1 = mne.EvokedArray(np.mean(ica_data, axis=0),
                      mne.create_info(30, epochs.info['sfreq'],
                                      ch_types='eeg'),
                      tmin=tmin)
コード例 #20
0
    X_raw = epochs_clean.get_data(
    )[:, :, :]  # MEG signals: n_epochs, n_channels, n_times (exclude non MEG channels)
    y_raw = epochs_clean.events[:, 2]  # Get event types
    # y_raw = np.array([i for n, i in enumerate(y_raw) if n not in drop_idx])

    # select events and time period of interest
    event_selector = (y_raw < 23) | (y_raw == 99)
    X_raw = X_raw[event_selector, ...]
    y_raw = y_raw[event_selector]
    X_raw = X_raw[:, 29:302, :]

    # print("Number of unique events = {0}\n\nEvent types = {1}".format(len(np.unique(y_raw)),
    #                                                                   np.unique(y_raw))

    # Do PCA with 50 components
    pca = UnsupervisedSpatialFilter(PCA(50), average=False)
    pca_data = pca.fit_transform(X_raw)
    X_raw = pca_data

    # CLASSIFIER
    # Logistic regression with L2 penalty, multi-class classification performed as one-vs-rest
    # Data is transformed to have zero mean and unit variance before being passed to the classifier

    clf = make_pipeline(
        StandardScaler(),
        LogisticRegression(multi_class='multinomial',
                           C=0.1,
                           penalty='l2',
                           solver='saga',
                           tol=0.01))
コード例 #21
0
ファイル: rsa.py プロジェクト: Fosca/umne
def gen_observed_dissimilarity(epochs0,
                               epochs1,
                               n_pca=30,
                               metric='spearmanr',
                               sliding_window_size=None,
                               sliding_window_step=None,
                               sliding_window_min_size=None,
                               debug=None):
    """
    Generate the observed dissimilarity matrix

    :param epochs0: Epohcs, averaged over the relevant parameters
    :param epochs1: Epohcs, averaged over the relevant parameters
    :param n_pca: the number of PCA components.
    :param metric: The metric to use when calculating distance between instances in a feature array, for
            non-Riemannian dissimilarity.
            If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist
            for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
            If metric is precomputed, X is assumed to be a distance matrix.
            Alternatively, if metric is a callable function, it is called on each pair of instances (rows)
            and the resulting value recorded.
            The callable should take two arrays from X as input and return a value indicating the distance between them.
    :param sliding_window_size: If specified (!= None), the data will be averaged using a sliding window before
                    computing dissimilarity. This parameter is the number of time points included in each window
    :param sliding_window_step: The number of time points for sliding the window on each step
    :param sliding_window_min_size: The minimal number of time points acceptable in the last step of the sliding window.
                                If None: min_window_size will be the same as window_size

    :return: np.array
    """

    #-- Validate input
    assert (sliding_window_size is None) == (sliding_window_step is None), \
        "Either sliding_window_size and sliding_window_step are both None, or they are both not None"
    debug = debug or set()

    if metric == 'mahalanobis' and n_pca is not None:
        print(
            'WARNING: PCA should not be used for metric=mahalanobis, ignoring this parameter'
        )
        n_pca = None

    #-- Original data: #epochs x Channels x TimePoints
    data1 = epochs0.get_data()
    data2 = epochs1.get_data()

    #-- z-scoring doesn't change the data dimensions
    data1 = transformers.ZScoreEachChannel(
        debug=debug is True or 'zscore' in debug).fit_transform(data1)
    data2 = transformers.ZScoreEachChannel(
        debug=debug is True or 'zscore' in debug).fit_transform(data2)

    #-- Run PCA. Resulting data: Epochs x PCA-Components x TimePoints
    if n_pca is not None:
        pca = UnsupervisedSpatialFilter(PCA(n_pca), average=False)
        combined_data = np.vstack([data1, data2])
        pca.fit(combined_data)
        data1 = pca.transform(data1)
        data2 = pca.transform(data2)

    #-- Apply a sliding window
    #-- Result in non-Riemann mode: epochs x Channels/components x TimePoints
    #-- todo: Result in Riemann: TimeWindows x Stimuli x Channels/components x TimePoints-within-one-window; will require SlidingWindow(average=False)
    if sliding_window_size is None:
        times = epochs0.times

    else:
        xformer = transformers.SlidingWindow(
            window_size=sliding_window_size,
            step=sliding_window_step,
            min_window_size=sliding_window_min_size)
        data1 = xformer.fit_transform(data1)
        data2 = xformer.fit_transform(data2)

        mid_window_inds = xformer.start_window_inds(len(
            epochs0.times)) + round(sliding_window_size / 2)
        times = epochs0.times[mid_window_inds]

    #-- Get the dissimilarity matrix
    #-- Result: Time point x epochs1 x epochs2
    dissim_matrices = _compute_dissimilarity(
        data1, data2, metric, debug is True or 'dissim' in debug)
    # todo in Riemann: xformer = RiemannDissimilarity(metric=riemann_metric, debug=debug is True or 'dissim' in debug)

    assert len(dissim_matrices) == len(
        times), "There are {} dissimilarity matrices but {} times".format(
            len(dissim_matrices), len(times))

    return DissimilarityMatrix(dissim_matrices,
                               epochs0.metadata,
                               epochs1.metadata,
                               times=times,
                               epochs0_info=epochs0.info,
                               epochs1_info=epochs1.info)