Esempio n. 1
0
def preprocessing_pipeline(subject_num, task_num, standard_source_prefix,
                           cond_filepath_prefix):

    img = prepare_standard_img(subject_num, task_num, standard_source_prefix)
    data = img.get_data()[..., 5:]

    n_trs = data.shape[-1] + 5

    cond_filename_003 = form_cond_filepath(subject_num, task_num, "003",
                                           cond_filepath_prefix)
    cond_filename_005 = form_cond_filepath(subject_num, task_num, "005",
                                           cond_filepath_prefix)
    cond_filename_001 = form_cond_filepath(subject_num, task_num, "001",
                                           cond_filepath_prefix)
    cond_filename_004 = form_cond_filepath(subject_num, task_num, "004",
                                           cond_filepath_prefix)
    cond_filename_007 = form_cond_filepath(subject_num, task_num, "007",
                                           cond_filepath_prefix)

    target_convolved, nontarget_convolved, error_convolved = conv_target_non_target(
        n_trs, cond_filename_003, cond_filename_007, TR, tr_divs=100.0)
    target_convolved, nontarget_convolved, error_convolved = target_convolved[
        5:], nontarget_convolved[5:], error_convolved[5:]

    block_regressor = events2neural_std(cond_filename_005, TR, n_trs)[5:]

    block_start_cues = conv_std(n_trs, cond_filename_001, TR)[5:]
    block_end_cues = conv_std(n_trs, cond_filename_004, TR)[5:]

    linear_drift = np.linspace(-1, 1, n_trs)
    qudratic_drift = linear_drift**2
    qudratic_drift -= np.mean(qudratic_drift)

    linear_drift = linear_drift[5:]
    qudratic_drift = qudratic_drift[5:]

    in_brain_mask, _ = prepare_mask(data, CUTOFF)

    pad_thickness = 2.0
    sigma = 2.0

    b_vols = spatial_smooth(data, in_brain_mask, pad_thickness, sigma, False)
    in_brain_tcs = b_vols[in_brain_mask]

    Y = in_brain_tcs.T
    Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
    unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
    U, S, V = npl.svd(unscaled_cov)

    n_betas = 10

    X = np.ones((n_trs - 5, n_betas))
    X[:, 0] = target_convolved
    X[:, 1] = nontarget_convolved
    X[:, 2] = error_convolved
    X[:, 3] = block_regressor
    X[:, 4] = block_start_cues
    X[:, 5] = block_end_cues
    X[:, 6] = linear_drift
    X[:, 7] = qudratic_drift
    X[:, 8] = U[:, 0]
    # 9th column is the intercept

    B = npl.pinv(X).dot(Y)

    residuals = in_brain_tcs - X.dot(B).T

    B[(3, 4, 5, 6, 7, 8, 9), :] = 0

    # project Y onto the functional betas
    functional_Y = X.dot(B).T

    b_vols = np.zeros((data.shape))
    b_vols[in_brain_mask, :] = functional_Y + residuals

    return b_vols, img, in_brain_mask
def preprocessing_pipeline(subject_num, task_num, standard_source_prefix, cond_filepath_prefix):

  img = prepare_standard_img(subject_num, task_num, standard_source_prefix)
  data = img.get_data()[..., 5:]

  n_trs = data.shape[-1] + 5

  cond_filename_003 = form_cond_filepath(subject_num, task_num, "003", cond_filepath_prefix)
  cond_filename_005 = form_cond_filepath(subject_num, task_num, "005", cond_filepath_prefix)
  cond_filename_001 = form_cond_filepath(subject_num, task_num, "001", cond_filepath_prefix)
  cond_filename_004 = form_cond_filepath(subject_num, task_num, "004", cond_filepath_prefix)
  cond_filename_007 = form_cond_filepath(subject_num, task_num, "007", cond_filepath_prefix)

  target_convolved, nontarget_convolved, error_convolved = conv_target_non_target(n_trs, cond_filename_003, cond_filename_007, TR, tr_divs = 100.0)
  target_convolved, nontarget_convolved, error_convolved = target_convolved[5:], nontarget_convolved[5:], error_convolved[5:]

  block_regressor = events2neural_std(cond_filename_005, TR, n_trs)[5:]

  block_start_cues = conv_std(n_trs, cond_filename_001, TR)[5:]
  block_end_cues = conv_std(n_trs, cond_filename_004, TR)[5:]

  linear_drift = np.linspace(-1, 1, n_trs)
  qudratic_drift = linear_drift ** 2
  qudratic_drift -= np.mean(qudratic_drift)

  linear_drift = linear_drift[5:]
  qudratic_drift = qudratic_drift[5:]

  in_brain_mask, _ = prepare_mask(data, CUTOFF)

  pad_thickness = 2.0
  sigma = 2.0

  b_vols = spatial_smooth(data, in_brain_mask, pad_thickness, sigma, False)
  in_brain_tcs = b_vols[in_brain_mask]

  Y = in_brain_tcs.T
  Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
  unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
  U, S, V = npl.svd(unscaled_cov)

  n_betas = 10

  X = np.ones((n_trs - 5, n_betas))
  X[:, 0] = target_convolved
  X[:, 1] = nontarget_convolved
  X[:, 2] = error_convolved
  X[:, 3] = block_regressor
  X[:, 4] = block_start_cues
  X[:, 5] = block_end_cues
  X[:, 6] = linear_drift
  X[:, 7] = qudratic_drift
  X[:, 8] = U[:,0]
  # 9th column is the intercept

  B = npl.pinv(X).dot(Y)

  residuals = in_brain_tcs - X.dot(B).T

  B[(3,4,5,6,7,8,9),:] = 0

  # project Y onto the functional betas
  functional_Y = X.dot(B).T

  b_vols = np.zeros((data.shape))
  b_vols[in_brain_mask, :] = functional_Y + residuals

  return b_vols, img, in_brain_mask
Esempio n. 3
0
def single_subject_linear_model(standard_source_prefix, cond_filepath_prefix, subject_num, task_num, output_filename):

  data = prepare_standard_data(subject_num, task_num, standard_source_prefix)

  n_trs = data.shape[-1] + 5

  cond_filename_003 = form_cond_filepath(subject_num, task_num, "003", cond_filepath_prefix)
  cond_filename_005 = form_cond_filepath(subject_num, task_num, "005", cond_filepath_prefix)
  cond_filename_001 = form_cond_filepath(subject_num, task_num, "001", cond_filepath_prefix)
  cond_filename_004 = form_cond_filepath(subject_num, task_num, "004", cond_filepath_prefix)
  cond_filename_007 = form_cond_filepath(subject_num, task_num, "007", cond_filepath_prefix)

  target_convolved, nontarget_convolved, error_convolved = conv_target_non_target(n_trs, cond_filename_003, cond_filename_007, TR, tr_divs = 100.0)
  target_convolved, nontarget_convolved, error_convolved = target_convolved[5:], nontarget_convolved[5:], error_convolved[5:]

  block_regressor = events2neural_std(cond_filename_005, TR, n_trs)[5:]

  block_start_cues = conv_std(n_trs, cond_filename_001, TR)[5:]
  block_end_cues = conv_std(n_trs, cond_filename_004, TR)[5:]

  linear_drift = np.linspace(-1, 1, n_trs)
  qudratic_drift = linear_drift ** 2
  qudratic_drift -= np.mean(qudratic_drift)

  linear_drift = linear_drift[5:]
  qudratic_drift = qudratic_drift[5:]

  in_brain_mask, _ = prepare_mask(data, 5000)

  pad_thickness = 2.0
  sigma = 2.0

  b_vols = spatial_smooth(data, in_brain_mask, pad_thickness, sigma, False)
  in_brain_tcs = b_vols[in_brain_mask]

  Y = in_brain_tcs.T
  Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
  unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
  U, S, V = npl.svd(unscaled_cov)

  n_betas = 11

  X = np.ones((n_trs - 5, n_betas))
  X[:, 0] = target_convolved
  X[:, 1] = nontarget_convolved
  X[:, 2] = error_convolved
  X[:, 3] = block_regressor
  X[:, 4] = block_start_cues
  X[:, 5] = block_end_cues
  X[:, 6] = linear_drift
  X[:, 7] = qudratic_drift
  X[:, 8] = U[:,0]
  X[:, 9] = U[:,1]
  # 10th column is the intercept


  # plot design matrix
  plt.figure()
  plt.imshow(X, aspect=0.1)
  plt.savefig(os.path.join(output_filename, "sub%s_task%s_design_matrix.png" % (subject_num, task_num)), format='png', dpi=500)      

  B = npl.pinv(X).dot(Y)

  # test normality of residuals
  residuals = Y.T - X.dot(B).T
  alpha_test, bonferroni_test, hochberg_test, benjamini_test = [val * 1.0 / Y.shape[-1] for val in multiple_comp(residuals)]
  normality_test_results = {"Alpha Test":alpha_test, "Bonferroni Procedure":bonferroni_test,"Hochberg Procedure":hochberg_test,"Benjamini-Hochberg Procedure":benjamini_test}

  normality_test_pd = pd.DataFrame(normality_test_results, index=["Failure Rate"])

  normality_test_pd.to_csv(os.path.join(output_filename, "sub%s_linear_model_normality_tests_failure_rates.csv" % subject_num))


  rs_squared = []
  for i in range(Y.shape[-1]):
    r_squared = 1 - np.sum((Y[:,i] - X.dot(B[:,i]))**2) * 1.0 / np.sum((Y[:,i] - np.mean(Y[:,i])) ** 2)
    rs_squared.append(r_squared)

  np.savetxt(os.path.join(output_filename, "glm_mean_R_squared_" + ("0_back" if task_num == "001" else "2_back") + ".txt"), np.array([np.mean(rs_squared)]))

  b_vols = np.zeros((data.shape[0:-1] + (n_betas,)))
  b_vols[in_brain_mask, :] = B.T


  # compute t values for target and nontarget betas

  t_test_target_beta = 0
  t_values = compute_t_values(X, B, Y, t_test_target_beta)
  t_vols_beta_0 = np.zeros((data.shape[0:-1]))
  t_vols_beta_0[in_brain_mask] = t_values

  t_test_target_beta = 1
  t_values = compute_t_values(X, B, Y, t_test_target_beta)
  t_vols_beta_1 = np.zeros((data.shape[0:-1]))
  t_vols_beta_1[in_brain_mask] = t_values

  # compute t values for noise regressor betas

  t_values = compute_t_values(X, B, Y, 6)
  t_vols_beta_6 = np.zeros((data.shape[0:-1]))
  t_vols_beta_6[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 7)
  t_vols_beta_7 = np.zeros((data.shape[0:-1]))
  t_vols_beta_7[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 8)
  t_vols_beta_8 = np.zeros((data.shape[0:-1]))
  t_vols_beta_8[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 9)
  t_vols_beta_9 = np.zeros((data.shape[0:-1]))
  t_vols_beta_9[in_brain_mask] = t_values

  t_vols_beta_6_to_9 = [t_vols_beta_6, t_vols_beta_7, t_vols_beta_8, t_vols_beta_9]

  return b_vols, in_brain_mask, U, Y, data, t_vols_beta_0, t_vols_beta_1, t_vols_beta_6_to_9  
"""sub011, task001_run_001"""

import project_config
import nibabel as nib
import os
import numpy as np
import outliers_utils
import matplotlib.pyplot as plt
from general_utils import form_cond_filepath

data_dir_path = os.path.join(os.path.dirname(__file__), '..', 'data')
output_dir = os.path.join(os.path.dirname(__file__), '..', 'results')
BOLD_file_1 = os.path.join(data_dir_path,
                           'sub011/BOLD/task001_run001/bold.nii.gz')
cond_filename = form_cond_filepath('011', '001', '002', data_dir_path)

#loading data
img = nib.load(BOLD_file_1)
data = img.get_data()
ds1 = data.shape
#drop the first five
data = data[..., 5:]

#standard deviations of all voxels along the TRs.
std = outliers_utils.vol_std(data)

#find the std outliers
outlier = outliers_utils.iqr_outliers(std)[0]

#plot the std outliers
std_outlier = []
for additional preprocessing steps.
We detect the outliers on "sub011, task001_run_001"
"""

import project_config
import nibabel as nib
import os
import numpy as np 
import outliers_utils
import matplotlib.pyplot as plt
from general_utils import form_cond_filepath

data_dir_path = os.path.join(os.path.dirname(__file__), '..', 'data')
output_dir = os.path.join(os.path.dirname(__file__), '..', 'results')
BOLD_file_1 = os.path.join(data_dir_path, 'sub011/BOLD/task001_run001/bold.nii.gz')
cond_filename = form_cond_filepath('011', '001', '002', data_dir_path)

#loading data
img = nib.load(BOLD_file_1)
data = img.get_data() 
ds1 = data.shape
#drop the first five
data = data[..., 5:]

#standard deviations of all voxels along the TRs.
std = outliers_utils.vol_std(data)

#find the std outliers
outlier = outliers_utils.iqr_outliers(std)[0]

#plot the std outliers
Esempio n. 6
0

if __name__ == "__main__":

    data_dir_path = os.path.join(os.path.dirname(__file__), "..", "data")
    brain_structure_path = os.path.join(
        data_dir_path, "mni_icbm152_csf_tal_nlin_asym_09c_2mm.nii")
    nice_cmap_values_path = os.path.join(data_dir_path, "actc.txt")

    plt.rcParams['image.cmap'] = 'gray'
    plt.rcParams['image.interpolation'] = 'nearest'

    subject_num = "011"
    task_num = "001"
    cond_num = "003"

    standard_source_prefix = data_dir_path
    cond_filepath_011 = form_cond_filepath(subject_num, task_num, cond_num,
                                           data_dir_path)
    output_filename = os.path.join(os.path.dirname(__file__), "..", "results")

    cutoff = project_config.MNI_CUTOFF

    brain_structure = nib.load(brain_structure_path).get_data()
    nice_cmap_values = np.loadtxt(nice_cmap_values_path)

    single_subject_activation_across_methods(standard_source_prefix,
                                             cond_filepath_011, subject_num,
                                             task_num, brain_structure,
                                             nice_cmap_values)
  b_vols_corrs_conv = np.zeros(in_brain_mask.shape)
  b_vols_corrs_conv[in_brain_mask] = corrs_conv

  plot_across_methods(b_vols_corrs_sw, b_vols_corrs_conv, subject_num, brain_structure, nice_cmap_values, in_brain_mask)

if __name__ == "__main__":

  data_dir_path = os.path.join(os.path.dirname(__file__), "..", "data")
  brain_structure_path = os.path.join(data_dir_path, "mni_icbm152_csf_tal_nlin_asym_09c_2mm.nii")
  nice_cmap_values_path = os.path.join(data_dir_path, "actc.txt")

  plt.rcParams['image.cmap'] = 'gray'
  plt.rcParams['image.interpolation'] = 'nearest'

  subject_num = "011"
  task_num = "001"
  cond_num = "003"

  standard_source_prefix = data_dir_path
  cond_filepath_011 = form_cond_filepath(subject_num, task_num, cond_num, data_dir_path)
  output_filename = os.path.join(os.path.dirname(__file__), "..", "results")

  cutoff = project_config.MNI_CUTOFF

  brain_structure = nib.load(brain_structure_path).get_data()
  nice_cmap_values = np.loadtxt(nice_cmap_values_path)

  single_subject_activation_across_methods(standard_source_prefix, cond_filepath_011, subject_num, task_num, brain_structure, nice_cmap_values)
  
  labels = kmeans.perform_kMeans_clustering_analysis(residuals.reshape((-1, residuals.shape[-1])), 6)

  b_vols = np.zeros(in_brain_mask.shape)
  b_vols[in_brain_mask] = labels
  b_vols[~in_brain_mask] = np.nan

  return b_vols


if __name__ == "__main__": 

  data_dir_path = os.path.join(os.path.dirname(__file__), "..", "data")
  brain_structure_path = os.path.join(data_dir_path, "mni_icbm152_csf_tal_nlin_asym_09c_2mm.nii")

  standard_source_prefix = data_dir_path
  cond_filepath_011 = form_cond_filepath("011", "001", "003", data_dir_path)
  output_filename = os.path.join(os.path.dirname(__file__), "..", "results")

  subject_num = "011"
  task_num = "001"
  cond_num = "002"

  plt.rcParams['image.cmap'] = 'gray'
  plt.rcParams['image.interpolation'] = 'nearest'

  cutoff = project_config.MNI_CUTOFF

  nice_cmap_values_path = os.path.join(data_dir_path, "actc.txt")
  brain_structure = nib.load(brain_structure_path).get_data()
  nice_cmap_values = np.loadtxt(os.path.join(data_dir_path, "actc.txt"))
  nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
Esempio n. 9
0
def single_subject_linear_model(standard_source_prefix, cond_filepath_prefix, subject_num, task_num, output_filename):

  data = prepare_standard_data(subject_num, task_num, standard_source_prefix)

  n_trs = data.shape[-1] + 5

  cond_filename_003 = form_cond_filepath(subject_num, task_num, "003", cond_filepath_prefix)
  cond_filename_005 = form_cond_filepath(subject_num, task_num, "005", cond_filepath_prefix)
  cond_filename_001 = form_cond_filepath(subject_num, task_num, "001", cond_filepath_prefix)
  cond_filename_004 = form_cond_filepath(subject_num, task_num, "004", cond_filepath_prefix)
  cond_filename_007 = form_cond_filepath(subject_num, task_num, "007", cond_filepath_prefix)

  target_convolved, nontarget_convolved, error_convolved = conv_target_non_target(n_trs, cond_filename_003, cond_filename_007, TR, tr_divs = 100.0)
  target_convolved, nontarget_convolved, error_convolved = target_convolved[5:], nontarget_convolved[5:], error_convolved[5:]

  block_regressor = events2neural_std(cond_filename_005, TR, n_trs)[5:]

  block_start_cues = conv_std(n_trs, cond_filename_001, TR)[5:]
  block_end_cues = conv_std(n_trs, cond_filename_004, TR)[5:]

  linear_drift = np.linspace(-1, 1, n_trs)
  qudratic_drift = linear_drift ** 2
  qudratic_drift -= np.mean(qudratic_drift)

  linear_drift = linear_drift[5:]
  qudratic_drift = qudratic_drift[5:]

  in_brain_mask, _ = prepare_mask(data, 5000)

  pad_thickness = 2.0
  sigma = 2.0

  b_vols = spatial_smooth(data, in_brain_mask, pad_thickness, sigma, False)
  in_brain_tcs = b_vols[in_brain_mask]

  Y = in_brain_tcs.T
  Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
  unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
  U, S, V = npl.svd(unscaled_cov)

  n_betas = 11

  X = np.ones((n_trs - 5, n_betas))
  X[:, 0] = target_convolved
  X[:, 1] = nontarget_convolved
  X[:, 2] = error_convolved
  X[:, 3] = block_regressor
  X[:, 4] = block_start_cues
  X[:, 5] = block_end_cues
  X[:, 6] = linear_drift
  X[:, 7] = qudratic_drift
  X[:, 8] = U[:,0]
  X[:, 9] = U[:,1]
  # 10th column is the intercept


  # plot design matrix
  plt.figure()
  plt.imshow(X, aspect=0.1)
  plt.savefig(os.path.join(output_filename, "sub%s_task%s_design_matrix.png" % (subject_num, task_num)), format='png', dpi=500)      

  B = npl.pinv(X).dot(Y)

  # test normality of residuals
  residuals = Y.T - X.dot(B).T
  alpha_test, bonferroni_test, hochberg_test, benjamini_test = [val * 1.0 / Y.shape[-1] for val in multiple_comp(residuals)]
  normality_test_results = {"Alpha Test":alpha_test, "Bonferroni Procedure":bonferroni_test,"Hochberg Procedure":hochberg_test,"Benjamini-Hochberg Procedure":benjamini_test}

  normality_test_pd = pd.DataFrame(normality_test_results, index=["Failure Rate"])

  normality_test_pd.to_csv(os.path.join(output_filename, "sub%s_task%s_linear_model_normality_tests_failure_rates.csv" % (subject_num, task_num)))

  rs_squared = []
  for i in range(Y.shape[-1]):
    r_squared = 1 - np.sum((Y[:,i] - X.dot(B[:,i]))**2) * 1.0 / np.sum((Y[:,i] - np.mean(Y[:,i])) ** 2)
    rs_squared.append(r_squared)

  np.savetxt(os.path.join(output_filename, "glm_mean_R_squared_" + ("0_back" if task_num == "001" else "2_back") + ".txt"), np.array([np.mean(rs_squared)]))

  b_vols = np.zeros((data.shape[0:-1] + (n_betas,)))
  b_vols[in_brain_mask, :] = B.T


  # compute t values for target and nontarget betas

  t_test_target_beta = 0
  t_values = compute_t_values(X, B, Y, t_test_target_beta)
  t_vols_beta_0 = np.zeros((data.shape[0:-1]))
  t_vols_beta_0[in_brain_mask] = t_values

  t_test_target_beta = 1
  t_values = compute_t_values(X, B, Y, t_test_target_beta)
  t_vols_beta_1 = np.zeros((data.shape[0:-1]))
  t_vols_beta_1[in_brain_mask] = t_values

  # compute t values for noise regressor betas

  t_values = compute_t_values(X, B, Y, 6)
  t_vols_beta_6 = np.zeros((data.shape[0:-1]))
  t_vols_beta_6[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 7)
  t_vols_beta_7 = np.zeros((data.shape[0:-1]))
  t_vols_beta_7[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 8)
  t_vols_beta_8 = np.zeros((data.shape[0:-1]))
  t_vols_beta_8[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 9)
  t_vols_beta_9 = np.zeros((data.shape[0:-1]))
  t_vols_beta_9[in_brain_mask] = t_values

  t_vols_beta_6_to_9 = [t_vols_beta_6, t_vols_beta_7, t_vols_beta_8, t_vols_beta_9]

  return b_vols, in_brain_mask, U, Y, data, t_vols_beta_0, t_vols_beta_1, t_vols_beta_6_to_9  
Esempio n. 10
0
    b_vols = np.zeros(in_brain_mask.shape)
    b_vols[in_brain_mask] = labels
    b_vols[~in_brain_mask] = np.nan

    return b_vols


if __name__ == "__main__":

    data_dir_path = os.path.join(os.path.dirname(__file__), "..", "data")
    brain_structure_path = os.path.join(
        data_dir_path, "mni_icbm152_csf_tal_nlin_asym_09c_2mm.nii")

    standard_source_prefix = data_dir_path
    cond_filepath_011 = form_cond_filepath("011", "001", "003", data_dir_path)
    output_filename = os.path.join(os.path.dirname(__file__), "..", "results")

    subject_num = "011"
    task_num = "001"
    cond_num = "002"

    plt.rcParams['image.cmap'] = 'gray'
    plt.rcParams['image.interpolation'] = 'nearest'

    cutoff = project_config.MNI_CUTOFF

    nice_cmap_values_path = os.path.join(data_dir_path, "actc.txt")
    brain_structure = nib.load(brain_structure_path).get_data()
    nice_cmap_values = np.loadtxt(os.path.join(data_dir_path, "actc.txt"))
    nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')