Exemplo n.º 1
0
def test_prepare_mvpa_data():
    processed_data, labels = io.prepare_mvpa_data(dir, extension, mask_file, epoch_file)
    expected_processed_data = np.load(os.path.join(os.path.dirname(__file__),
                                                   'data/expected_processed_data.npy'))
    assert len(processed_data) == len(expected_processed_data), \
        'numbers of epochs do not match in test_prepare_mvpa_data'
    for idx in range(len(processed_data)):
        assert np.allclose(processed_data[idx], expected_processed_data[idx]), \
            'raw data do not match'
    assert np.array_equal(labels, expected_labels), \
        'the labels do not match in test_prepare_mvpa_data'
Exemplo n.º 2
0
def test_prepare_mvpa_data():
    processed_data, labels = io.prepare_mvpa_data(dir, extension, mask_file,
                                                  epoch_file)
    expected_processed_data = np.load(
        os.path.join(os.path.dirname(__file__),
                     'data/expected_processed_data.npy'))
    assert len(processed_data) == len(expected_processed_data), \
        'numbers of epochs do not match in test_prepare_mvpa_data'
    for idx in range(len(processed_data)):
        assert np.allclose(processed_data[idx], expected_processed_data[idx]), \
            'raw data do not match'
    assert np.array_equal(labels, expected_labels), \
        'the labels do not match in test_prepare_mvpa_data'
Exemplo n.º 3
0
#from sklearn.externals import joblib

format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)

# python mvpa_classification.py face_scene bet.nii.gz face_scene/visual_top_mask.nii.gz face_scene/fs_epoch_labels.npy
if __name__ == '__main__':
    data_dir = sys.argv[1]
    extension = sys.argv[2]
    mask_file = sys.argv[3]
    epoch_file = sys.argv[4]

    processed_data, labels = io.prepare_mvpa_data(data_dir, extension, mask_file, epoch_file)

    # transpose data to facilitate training and prediction
    processed_data = processed_data.T

    # no shrinking, set C=10
    clf = svm.SVC(kernel='rbf', shrinking=False, C=10)
    training_data = processed_data[0:204]
    test_data = processed_data[204:]
    clf.fit(training_data, labels[0:204])
    # joblib can be used for saving and loading models
    #joblib.dump(clf, 'model/logistic.pkl')
    #clf = joblib.load('model/svm.pkl')
    print(clf.predict(test_data))
    print(clf.decision_function(test_data))
    print(np.asanyarray(labels[204:]))
Exemplo n.º 4
0
# python mvpa_classification.py face_scene bet.nii.gz face_scene/visual_top_mask.nii.gz face_scene/fs_epoch_labels.npy
if __name__ == '__main__':
    data_dir = sys.argv[1]
    extension = sys.argv[2]
    mask_file = sys.argv[3]
    epoch_file = sys.argv[4]

    epoch_list = np.load(epoch_file)
    num_subjects = len(epoch_list)
    num_epochs_per_subj = epoch_list[0].shape[1]

    logger.info(
        'doing MVPA training and classification on %d subjects, each of which has %d epochs'
        % (num_subjects, num_epochs_per_subj))

    processed_data, labels = io.prepare_mvpa_data(data_dir, extension,
                                                  mask_file, epoch_file)

    # transpose data to facilitate training and prediction
    processed_data = processed_data.T

    clf = svm.SVC(kernel='linear', shrinking=False, C=1)
    # doing leave-one-subject-out cross validation
    for i in range(num_subjects):
        leave_start = i * num_epochs_per_subj
        leave_end = (i + 1) * num_epochs_per_subj
        training_data = np.concatenate(
            (processed_data[0:leave_start], processed_data[leave_end:]),
            axis=0)
        test_data = processed_data[leave_start:leave_end]
        training_labels = np.concatenate(
            (labels[0:leave_start], labels[leave_end:]), axis=0)