Example #1
0
def SRM_from_list(native_subj, story_breaks, story_names, nFeatures):
    srm = brainiak.funcalign.srm.SRM(features=nFeatures)
    srm.fit(native_subj)
    shared = srm.transform(native_subj)
    shared = zscore(np.dstack(shared), axis=1, ddof=1)

    shared_stories = dict()
    for i, k in enumerate(story_names):
        shared_stories[k] = shared[:, story_breaks[i]:story_breaks[i + 1], :]
    return shared_stories
Example #2
0
image_data = []
for s in range(subjects):
    if s % size == rank:
        image_data.append(np.concatenate([image_data_left[:, :, s], image_data_right[:, :, s]], axis=0))
    else:
        image_data.append(None)

# Z-score the image data
for subject in range(subjects):
    if subject % size == rank:
        image_data[subject] = stats.zscore(image_data[subject], axis=1, ddof=1)
    else:
        image_data[subject] = None

# Z-score the shared response data
image_data_shared = srm.transform(image_data)

for subject in range(subjects):
    image_data_shared[subject] = comm.bcast(image_data_shared[subject],
            root=(subject % size))

if rank == 0:
    for subject in range(subjects):
        image_data_shared[subject] = stats.zscore(image_data_shared[subject], axis=1, ddof=1)
    
    # Read the labels of the image data for training the classifier.
    labels = scipy.io.loadmat('data/label.mat')
    labels = np.squeeze(labels['label'])
    
    # Run a leave-one-out cross validation with the subjects
    train_labels = np.tile(labels, subjects-1)
    train_data = []
    test_data = []

    for s in range(subjects):
        train_data.append(movie_data[s, :, :nTR // 2])
        test_data.append(movie_data[s, :, nTR // 2:])

    for subject in range(subjects):
        train_data[subject] = stats.zscore(train_data[subject], axis=1, ddof=1)
    for subject in range(subjects):
        test_data[subject] = stats.zscore(test_data[subject], axis=1, ddof=1)

    srm = SRM(n_iter=10, features=50)
    srm.fit(train_data)

    data_shared = srm.transform(test_data)

    timesegmentmatching_accuracy_evaluation_loo_cv(data_shared,
                                                   win_size=6,
                                                   method="SRM")

    dpsrm = DPSRM(n_features=50)
    dpsrm.fit(np.array(train_data))
    data_shared_dpsrm = dpsrm.transform(test_data)
    timesegmentmatching_accuracy_evaluation_loo_cv(data_shared_dpsrm,
                                                   win_size=6,
                                                   method="DPSRM")

    data_shared_dpsrm_orthow = dpsrm.transform_orthow(test_data)
    timesegmentmatching_accuracy_evaluation_loo_cv(data_shared_dpsrm_orthow,
                                                   win_size=6,
Example #4
0
        #------ SRM -------------------------------------------------
        # get roidata & clean
        train = train_data[roi]
        test = test_data[roi]
        good_inds = utils.find_goodVox(train, test)
        train_good = train[:, good_inds, :]
        test_good = test[:, good_inds, :]

        # train srm on train data
        srm = brainiak.funcalign.srm.SRM(n_iter=10, features=feat)
        srm.fit(train_good)

        # transform test data as appropriate
        if srm_type == 'catch2shapes':
            data = np.array(srm.transform(test_good))

        elif srm_type == 'shapesResid' or srm_type == 'shapesShared':
            data = np.empty(train_good.shape)
            resid = np.empty(train_good.shape)
            for s in range(nsubs):
                data[s, :, :] = srm.w_[s].dot(np.array(srm.s_))
                resid[s, :, :] = train_good[s, :, :] - data[s, :, :]

            if srm_type == 'shapesResid':
                data = resid
        else:
            print('invalid srm type')
            break

        #------ Spatial sim ---------------------------------
Example #5
0
    # Fit the SRM data on all of the baseline subjects (not the recon subject in position 0)
    srm.fit(train_data[1:])

    print("\nSTEP 2: LEARN INDIVIDUAL WEIGHT MATRIX\n")

    # Now we can find the weights that would fit the held out subject into this space
    w_ = srm.transform_subject(train_data[0])

    # And we add these weights to our weight matrices
    srm.w_ = [w_] + srm.w_

print("\nSTEP 3: FIT THE SECOND HALF DATA USING LEARNED WEIGHTS")

# Now we can transform the test data -- for all of the subjects
shared_test = srm.transform(test_data)
print('Shape of shared test:', shared_test[0].shape)

print("\nSTEP 4: TRANSFORM GROUP AVERAGE INTO INDIVIDUAL SPACE")

# recon subject is not in the front if we are doing baseline analysis with individual features
if combine == 'no' and same_group == 1:
    # remove the test subject from the list
    mask = [
        subjects for subjects in range(len(shared_test))
        if subjects != leftout_sub
    ]
    average_base = np.mean(np.array(shared_test)[mask, :, :], axis=0)
    zscored_base = stats.zscore(average_base, axis=1, ddof=1)

    heldout_weight = srm.w_[leftout_sub]