Пример #1
0
        elif n_points == 4 * 4:
            isi = 4.8

        else:
            continue

        labels = np.hstack([[session] * int(19.2 / isi)
                            for session in range(4)])
        lplo2 = LeavePLabelOut(labels, p=2)
        for train_id, test_id in lplo2:
            betas_train, betas_test = betas_isi[train_id], betas_isi[test_id]
            conditions_train, conditions_test = (conditions_isi[train_id],
                                                 conditions_isi[test_id])

            # Feature selection
            betas_train, betas_test = de.feature_selection(betas_train,
                                                           betas_test,
                                                           conditions_train,
                                                           k=k)

            # Fit a logistic regression to score the model
            accuracy = de.glm_scoring(betas_train, betas_test,
                                      conditions_train, conditions_test)

            scores.append(accuracy)
            subjects.append(subject + 1)
            models.append(model)
            isis.append(isi)

    print('finished subject ' + str(subject))
    # Stack the BOLD signals and the design matrices
    fmri = np.vstack(fmri)
    design = np.vstack(design)
    stimuli = np.vstack(stimuli)
    session_id_fmri = np.hstack(session_id_fmri)

    lplo = LeavePLabelOut(session_id_fmri, p=2)
    for train_index, test_index in lplo:
        # Split into train and test sets
        fmri_train, fmri_test = fmri[train_index], fmri[test_index]
        design_train, design_test = design[train_index], design[test_index]
        stimuli_train, stimuli_test = stimuli[train_index], stimuli[test_index]

        # Feature selection
        fmri_train, fmri_test = de.feature_selection(
            fmri_train, fmri_test, np.argmax(stimuli_train, axis=1))

        # Fit a ridge regression to predict the design matrix
        prediction_test, prediction_train, score = de.fit_ridge(
            fmri_train,
            fmri_test,
            design_train,
            design_test,
            double_prediction=True,
            extra=fmri_train)

        # Fit a logistic regression for deconvolution
        accuracy = de.logistic_deconvolution(prediction_train,
                                             prediction_test,
                                             stimuli_train,
                                             stimuli_test,
            logistic_window = 4
            delay = 1

        else:
            continue

        labels = np.hstack([[session] * 20 for session in range(4)])
        lplo2 = LeavePLabelOut(labels, p=1)
        for train_id, test_id in lplo2:
            fmri_train, fmri_test = fmri_isi[train_id], fmri_isi[test_id]
            design_train, design_test = (design_isi[train_id],
                                         design_isi[test_id])
            stimuli_train, stimuli_test = (stimuli_isi[train_id],
                                           stimuli_isi[test_id])
            # Feature selection
            fmri_train, fmri_test = de.feature_selection(
                fmri_train, fmri_test, np.argmax(stimuli_train, axis=1))

            # Fit a ridge regression to predict the design matrix
            prediction_test, prediction_train, score = de.fit_ridge(
                fmri_train, fmri_test, design_train, design_test,
                double_prediction=True, extra=fmri_train)

            # Fit a logistic regression for deconvolution
            accuracy = de.logistic_deconvolution(
                prediction_train, prediction_test, stimuli_train,
                stimuli_test, logistic_window, delay=delay)

            scores.append(accuracy)
            subjects.append(subject + 1)
            models.append('logistic deconvolution')
            isis.append(isi)
        elif n_points == 6 * 4:
            isi = 3.2

        elif n_points == 4 * 4:
            isi = 4.8

        else:
            continue

        labels = np.hstack([[session] * int(19.2/isi) for session in range(4)])
        lplo2 = LeavePLabelOut(labels, p=2)
        for train_id, test_id in lplo2:
            betas_train, betas_test = betas_isi[train_id], betas_isi[test_id]
            conditions_train, conditions_test = (conditions_isi[train_id],
                                                 conditions_isi[test_id])

            # Feature selection
            betas_train, betas_test = de.feature_selection(
                betas_train, betas_test, conditions_train, k=k)

            # Fit a logistic regression to score the model
            accuracy = de.glm_scoring(betas_train, betas_test, conditions_train,
                                      conditions_test)

            scores.append(accuracy)
            subjects.append(subject + 1)
            models.append(model)
            isis.append(isi)

    print('finished subject ' + str(subject))
Пример #5
0
    junk_mask = np.where(conditions != 'ju')
    conditions = conditions[junk_mask]
    fmri_windows = fmri_windows[junk_mask]
    session_id_onset = session_id_onset[junk_mask]

    lplo = LeavePLabelOut(session_id_onset, p=1)
    for train_index, test_index in lplo:
        # Split into train and test sets
        fmri_windows_train, fmri_windows_test = (fmri_windows[train_index],
                                                 fmri_windows[test_index])
        conditions_train, conditions_test = (conditions[train_index],
                                             conditions[test_index])

        # Feature selection
        fmri_windows_train, fmri_windows_test = de.feature_selection(
            fmri_windows_train, fmri_windows_test, conditions_train, k=k)

        # Fit a logistic regression to score the model
        accuracy = de.svm_scoring(fmri_windows_train, fmri_windows_test,
                                  conditions_train, conditions_test)

        scores.append(accuracy)
        subjects.append(subject + 1)
        models.append(model)

    print('finished subject ' + str(subject))

dict = {}
dict['subject'] = subjects
dict['model'] = models
dict['accuracy'] = scores
Пример #6
0
    junk_mask = np.where(conditions != 'ju')
    conditions = conditions[junk_mask]
    fmri_windows = fmri_windows[junk_mask]
    session_id_onset = session_id_onset[junk_mask]

    lplo = LeavePLabelOut(session_id_onset, p=1)
    for train_index, test_index in lplo:
        # Split into train and test sets
        fmri_windows_train, fmri_windows_test = (fmri_windows[train_index],
                                                 fmri_windows[test_index])
        conditions_train, conditions_test = (conditions[train_index],
                                             conditions[test_index])

        # Feature selection
        fmri_windows_train, fmri_windows_test = de.feature_selection(
            fmri_windows_train, fmri_windows_test, conditions_train, k=k)

        # Fit a logistic regression to score the model
        accuracy = de.svm_scoring(fmri_windows_train, fmri_windows_test,
                                  conditions_train, conditions_test)

        scores.append(accuracy)
        subjects.append(subject + 1)
        models.append(model)

    print('finished subject ' + str(subject))

dict = {}
dict['subject'] = subjects
dict['model'] = models
dict['accuracy'] = scores