Exemplo n.º 1
0
def debiasing(data_file, mask, mtx, idx_u, idx_v, tr, out_dir, history_str):
    """
    Perform debiasing based on denoised edge-time matrix.
    """
    print("Performing debiasing based on denoised edge-time matrix...")
    masker = NiftiLabelsMasker(
        labels_img=mask,
        standardize=False,
        memory="nilearn_cache",
        strategy="mean",
    )

    # Read data
    data = masker.fit_transform(data_file)

    # Generate mask of significant edge-time connections
    ets_mask = np.zeros(data.shape)
    idxs = np.where(mtx != 0)
    time_idxs = idxs[0]
    edge_idxs = idxs[1]

    print("Generating mask of significant edge-time connections...")
    for idx, time_idx in enumerate(time_idxs):
        ets_mask[time_idx, idx_u[edge_idxs[idx]]] = 1
        ets_mask[time_idx, idx_v[edge_idxs[idx]]] = 1

    # Create HRF matrix
    hrf = HRFMatrix(
        TR=tr,
        TE=[0],
        nscans=data.shape[0],
        r2only=True,
        has_integrator=False,
        is_afni=True,
    )
    hrf.generate_hrf()

    # Perform debiasing
    deb_output = debiasing_spike(hrf, data, ets_mask)
    beta = deb_output["beta"]
    fitt = deb_output["betafitts"]

    # Transform results back to 4D
    beta_4D = masker.inverse_transform(beta)
    beta_file = join(out_dir, f"{basename(data_file[:-7])}_beta_ETS.nii.gz")
    beta_4D.to_filename(beta_file)
    atlas_mod.inverse_transform(beta_file, data_file)
    subprocess.run(f"3dNotes {join(out_dir, beta_file)} -h {history_str}",
                   shell=True)

    fitt_4D = masker.inverse_transform(fitt)
    fitt_file = join(out_dir, f"{basename(data_file[:-7])}_fitt_ETS.nii.gz")
    fitt_4D.to_filename(fitt_file)
    subprocess.run(f"3dNotes {join(out_dir, fitt_file)} -h {history_str}",
                   shell=True)
    atlas_mod.inverse_transform(fitt_file, data_file)

    print("Debiasing finished and files saved.")

    return beta, fitt
Exemplo n.º 2
0
config = json.load(open(join(artifact_dir, 'config.json'), 'r'))

model = load(join(artifact_dir, 'estimator.pkl'))
maps = model.coef_

source = config['source']

if source == 'craddock':
    components = fetch_craddock_parcellation().parcellate400
    data = np.ones_like(check_niimg(components).get_data())
    mask = new_img_like(components, data)
    label_masker = NiftiLabelsMasker(labels_img=components,
                                     smoothing_fwhm=0,
                                     mask_img=mask).fit()
    maps_img = label_masker.inverse_transform(maps)
else:
    mask = fetch_mask()
    masker = MultiNiftiMasker(mask_img=mask).fit()

    if source == 'msdl':
        components = fetch_atlas_msdl()['maps']
        components = masker.transform(components)
    elif source in ['hcp_rs', 'hcp_rs_concat', 'hcp_rs_positive']:
        data = fetch_atlas_modl()
        if source == 'hcp_rs':
            components_imgs = [data.nips2017_components64]
        elif source == 'hcp_rs_concat':
            components_imgs = [
                data.nips2017_components16, data.nips2017_components64,
                data.nips2017_components256
Exemplo n.º 3
0
    cur_pval = (1. + np.sum(perm_Rs[1:, 0] > actual_Rs[i_coef])) / n_permutations
    pvals.append(cur_pval)
    print(cur_pval)
pvals = np.array(pvals)
print('%i CCs are significant at p<0.05' % np.sum(pvals < 0.05))
print('%i CCs are significant at p<0.01' % np.sum(pvals < 0.01))
print('%i CCs are significant at p<0.001' % np.sum(pvals < 0.001))



################
## plot data: ##
################

AAL_weights = pca_brain.inverse_transform(actual_cca.y_weights_[:, 0])
out_nii_CCA = masker.inverse_transform(AAL_weights[None, :])
out_nii_CCA.to_filename('out_nii_CCA{}.nii'.format(1))

out_beh_weights = pca_beh.inverse_transform(actual_cca.x_weights_[:, 0])
out_beh_weights = out_beh_weights.reshape((36, 1))

# save data to dataframe for later analyses
original_beh = {}
original_brain = {}
original_beh["beh_original"] = np.squeeze(out_beh_weights)
original_brain["brain_original"] = AAL_weights
df_brain_vo = pd.DataFrame(data = original_brain)
df_beh_vo = pd.DataFrame(data = original_beh)
df_brain_vo.to_pickle("df_brain_vo")
df_beh_vo["variables"] = df_beh.columns
df_beh_vo = df_beh_vo.set_index("variables")

# Prepare ploting
basc = datasets.fetch_atlas_basc_multiscale_2015(version='asym')['scale444']
brainmask = load_mni152_brain_mask()
masker = NiftiLabelsMasker(labels_img = basc, mask_img = brainmask, 
                           memory_level=1, verbose=0,
                           detrend=True, standardize=False,  
                           high_pass=0.01,t_r=2.28,
                           resampling_target='labels'
                           )
masker.fit()

pipeline.fit(roi_foot_all,y_foot_all)
coef_foot = pipeline.named_steps['svm'].coef_
weight_f = masker.inverse_transform(coef_foot)
plot_stat_map(weight_f, title='Train Imp',display_mode='z',cmap='bwr',threshold=0.4)


pipeline.fit(roi_hand_all,y_hand_all)
coef_hand = pipeline.named_steps['svm'].coef_
weight_h = masker.inverse_transform(coef_hand)
plot_stat_map(weight_h, title='Train Imp',display_mode='z',cmap='bwr',threshold=0.1)


from sklearn.preprocessing import normalize
#nh=normalize(coef_imp)
#nf=normalize(coef_imag)
#coef_common=nh.T*nf.T
coef_common=coef_hand.T*coef_foot.T
z = coef_common[:]
Exemplo n.º 5
0
print("Plot mean activation (per condition) for the first two ROIs.")

print("Use the new ROIs to extract data maps in both ROIs.")
first_roi_masker = NiftiLabelsMasker(labels_img=first_roi_img,
                                     resampling_target=None,
                                     standardize=False, detrend=False)
first_roi_masker.fit()
second_roi_masker = NiftiLabelsMasker(labels_img=second_roi_img,
                                      resampling_target=None,
                                      standardize=False, detrend=False)
second_roi_masker.fit()
condition_names = list(set(haxby_labels))
n_cond_img = masked_fmri_vectors[haxby_labels == 'house', :].shape[0]
n_conds = len(condition_names)
for i, cond in enumerate(condition_names):
    cond_mask = haxby_labels == cond
    cond_img = image.index_img(fmri_masked_img, cond_mask)
    X1_vectors = mem.cache(first_roi_masker.transform)(cond_img).T
    X2_vectors = mem.cache(second_roi_masker.transform)(cond_img).T
    X1_image = first_roi_masker.inverse_transform(X1_vectors)
    X2_image = second_roi_masker.inverse_transform(X2_vectors)
    X1_mean_img = mem.cache(image.mean_img)(X1_image)
    X2_mean_img = mem.cache(image.mean_img)(X2_image)
    plot_two_maps(plot_stat_map,
                  (X1_mean_img, 'ROI #1 mean %s image' % cond),
                  (X2_mean_img, 'ROI #2 mean %s image' % cond))
    plt.show()

print("Compare activation profiles for face vs. house, for the first two ROIs.")
Exemplo n.º 6
0
		# Keep the proba of being hetero according to this fit, for the level 1 LogReg stalking
		probs = np.concatenate((probs, proba[:, 1] ), axis=None) # kept all test set folds
	# Keep this for the LogReg stalking
	output_level_0.append(probs) # 86 probas for 100 ROIs
	# for checking model fit quality
	accs_level_0.append(np.mean(sample_accs))
	accs_std_level_0.append(np.std(sample_accs))


# arraying the results
accs_level_0 = np.array(accs_level_0)
accs_std_level_0 = np.array(accs_std_level_0)

# niftiing the accuracies results
level0_accs4D = np.array([accs_level_0]*121) # set as fake 4D for nii transform
accs_level0_nii4D = masker.inverse_transform(level0_accs4D)
accs_level0_nii3D = image.index_img(accs_level0_nii4D, 0)
accs_level0_nii3D.to_filename("accs_level0_3D.nii") # transform as nii and save
# niftiing the accuracies stand deviation results
level0_accs_std4D = np.array([accs_std_level_0]*121) # set as fake 4D for nii transform
accs_std_level0_nii4D = masker.inverse_transform(level0_accs_std4D)
accs_std_level0_nii3D = image.index_img(accs_std_level0_nii4D, 0)
accs_std_level0_nii3D.to_filename("accs_std_level0_3D.nii") # transform as nii and save


# save results as csv to check when rerun the script #reproducibility
coefs_to_save = np.array([accs_level_0, accs_std_level_0]).T
df_accs_per_roi_level0 = pd.DataFrame(data=coefs_to_save, columns=["Accs_level0", "Accs_level0_std"], index=atlas.labels)
df_accs_per_roi_level0.to_excel("accs_per_roi_level0.xlsx")

Exemplo n.º 7
0
final_coefficients = np.mean(sample_coefs, axis=0)
final_coefficients_std = np.std(sample_coefs, axis=0)

# format results for the confusion matrix
all_y_pred_ = np.squeeze(np.array(all_y_pred).reshape(1, 4500))
all_y_true_ = np.squeeze(np.array(all_y_true).reshape(1, 4500))

# save results as csv to check when rerun the script #reproducibility
coefs_to_save = np.array([final_coefficients, final_coefficients_std]).T
df_coef_per_roi_mri = pd.DataFrame(data=coefs_to_save,
                                   columns=["Coef", "Coef_std"],
                                   index=atlas.labels)
df_coef_per_roi_mri.to_excel("coef_per_roi_mri.xlsx")

# niftiing the MRI results
final_coefficients_nii3D = masker.inverse_transform(
    final_coefficients.reshape(1, 100))
final_coefficients_nii3D.to_filename(
    "final_coefficients3D_mri.nii")  # transform as nii and save

final_coefficients_std_nii3D = masker.inverse_transform(
    final_coefficients_std.reshape(1, 100))
final_coefficients_std_nii3D.to_filename(
    "final_coefficients_std3D_mri.nii")  # transform as nii and save

################################
#### CONFUSION MATRIX ##########
################################


def rotateTickLabels(ax, rotation, which, rotation_mode='anchor', ha='left'):
    ''' Plotting function for the x axis labels to be centered