def preprocessing_pipeline(subject_num, task_num, standard_source_prefix):

  data_4d = prepare_standard_data(subject_num, task_num, standard_source_prefix)

  in_brain_mask, in_brain_vols = prepare_mask(data_4d, cutoff)

  data_4d_smoothed = spatial_smooth(data_4d, in_brain_mask, 2.0, 2.0, False)

  in_brain_mask, in_brain_vols = prepare_mask(data_4d_smoothed, cutoff)

  residuals = first_pcs_removed(in_brain_vols, 2)

  return residuals, in_brain_mask
def single_subject_linear_model(standard_source_prefix, cond_filepath_prefix, subject_num, task_num, output_filename):

  data = prepare_standard_data(subject_num, task_num, standard_source_prefix)

  n_trs = data.shape[-1] + 5

  cond_filename_003 = form_cond_filepath(subject_num, task_num, "003", cond_filepath_prefix)
  cond_filename_005 = form_cond_filepath(subject_num, task_num, "005", cond_filepath_prefix)
  cond_filename_001 = form_cond_filepath(subject_num, task_num, "001", cond_filepath_prefix)
  cond_filename_004 = form_cond_filepath(subject_num, task_num, "004", cond_filepath_prefix)
  cond_filename_007 = form_cond_filepath(subject_num, task_num, "007", cond_filepath_prefix)

  target_convolved, nontarget_convolved, error_convolved = conv_target_non_target(n_trs, cond_filename_003, cond_filename_007, TR, tr_divs = 100.0)
  target_convolved, nontarget_convolved, error_convolved = target_convolved[5:], nontarget_convolved[5:], error_convolved[5:]

  block_regressor = events2neural_std(cond_filename_005, TR, n_trs)[5:]

  block_start_cues = conv_std(n_trs, cond_filename_001, TR)[5:]
  block_end_cues = conv_std(n_trs, cond_filename_004, TR)[5:]

  linear_drift = np.linspace(-1, 1, n_trs)
  qudratic_drift = linear_drift ** 2
  qudratic_drift -= np.mean(qudratic_drift)

  linear_drift = linear_drift[5:]
  qudratic_drift = qudratic_drift[5:]

  in_brain_mask, _ = prepare_mask(data, 5000)

  pad_thickness = 2.0
  sigma = 2.0

  b_vols = spatial_smooth(data, in_brain_mask, pad_thickness, sigma, False)
  in_brain_tcs = b_vols[in_brain_mask]

  Y = in_brain_tcs.T
  Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
  unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
  U, S, V = npl.svd(unscaled_cov)

  n_betas = 11

  X = np.ones((n_trs - 5, n_betas))
  X[:, 0] = target_convolved
  X[:, 1] = nontarget_convolved
  X[:, 2] = error_convolved
  X[:, 3] = block_regressor
  X[:, 4] = block_start_cues
  X[:, 5] = block_end_cues
  X[:, 6] = linear_drift
  X[:, 7] = qudratic_drift
  X[:, 8] = U[:,0]
  X[:, 9] = U[:,1]
  # 10th column is the intercept


  # plot design matrix
  plt.figure()
  plt.imshow(X, aspect=0.1)
  plt.savefig(os.path.join(output_filename, "sub%s_task%s_design_matrix.png" % (subject_num, task_num)), format='png', dpi=500)      

  B = npl.pinv(X).dot(Y)

  # test normality of residuals
  residuals = Y.T - X.dot(B).T
  alpha_test, bonferroni_test, hochberg_test, benjamini_test = [val * 1.0 / Y.shape[-1] for val in multiple_comp(residuals)]
  normality_test_results = {"Alpha Test":alpha_test, "Bonferroni Procedure":bonferroni_test,"Hochberg Procedure":hochberg_test,"Benjamini-Hochberg Procedure":benjamini_test}

  normality_test_pd = pd.DataFrame(normality_test_results, index=["Failure Rate"])

  normality_test_pd.to_csv(os.path.join(output_filename, "sub%s_linear_model_normality_tests_failure_rates.csv" % subject_num))


  rs_squared = []
  for i in range(Y.shape[-1]):
    r_squared = 1 - np.sum((Y[:,i] - X.dot(B[:,i]))**2) * 1.0 / np.sum((Y[:,i] - np.mean(Y[:,i])) ** 2)
    rs_squared.append(r_squared)

  np.savetxt(os.path.join(output_filename, "glm_mean_R_squared_" + ("0_back" if task_num == "001" else "2_back") + ".txt"), np.array([np.mean(rs_squared)]))

  b_vols = np.zeros((data.shape[0:-1] + (n_betas,)))
  b_vols[in_brain_mask, :] = B.T


  # compute t values for target and nontarget betas

  t_test_target_beta = 0
  t_values = compute_t_values(X, B, Y, t_test_target_beta)
  t_vols_beta_0 = np.zeros((data.shape[0:-1]))
  t_vols_beta_0[in_brain_mask] = t_values

  t_test_target_beta = 1
  t_values = compute_t_values(X, B, Y, t_test_target_beta)
  t_vols_beta_1 = np.zeros((data.shape[0:-1]))
  t_vols_beta_1[in_brain_mask] = t_values

  # compute t values for noise regressor betas

  t_values = compute_t_values(X, B, Y, 6)
  t_vols_beta_6 = np.zeros((data.shape[0:-1]))
  t_vols_beta_6[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 7)
  t_vols_beta_7 = np.zeros((data.shape[0:-1]))
  t_vols_beta_7[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 8)
  t_vols_beta_8 = np.zeros((data.shape[0:-1]))
  t_vols_beta_8[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 9)
  t_vols_beta_9 = np.zeros((data.shape[0:-1]))
  t_vols_beta_9[in_brain_mask] = t_values

  t_vols_beta_6_to_9 = [t_vols_beta_6, t_vols_beta_7, t_vols_beta_8, t_vols_beta_9]

  return b_vols, in_brain_mask, U, Y, data, t_vols_beta_0, t_vols_beta_1, t_vols_beta_6_to_9  
Beispiel #3
0
def single_subject_linear_model(standard_source_prefix, cond_filepath_prefix, subject_num, task_num, output_filename):

  data = prepare_standard_data(subject_num, task_num, standard_source_prefix)

  n_trs = data.shape[-1] + 5

  cond_filename_003 = form_cond_filepath(subject_num, task_num, "003", cond_filepath_prefix)
  cond_filename_005 = form_cond_filepath(subject_num, task_num, "005", cond_filepath_prefix)
  cond_filename_001 = form_cond_filepath(subject_num, task_num, "001", cond_filepath_prefix)
  cond_filename_004 = form_cond_filepath(subject_num, task_num, "004", cond_filepath_prefix)
  cond_filename_007 = form_cond_filepath(subject_num, task_num, "007", cond_filepath_prefix)

  target_convolved, nontarget_convolved, error_convolved = conv_target_non_target(n_trs, cond_filename_003, cond_filename_007, TR, tr_divs = 100.0)
  target_convolved, nontarget_convolved, error_convolved = target_convolved[5:], nontarget_convolved[5:], error_convolved[5:]

  block_regressor = events2neural_std(cond_filename_005, TR, n_trs)[5:]

  block_start_cues = conv_std(n_trs, cond_filename_001, TR)[5:]
  block_end_cues = conv_std(n_trs, cond_filename_004, TR)[5:]

  linear_drift = np.linspace(-1, 1, n_trs)
  qudratic_drift = linear_drift ** 2
  qudratic_drift -= np.mean(qudratic_drift)

  linear_drift = linear_drift[5:]
  qudratic_drift = qudratic_drift[5:]

  in_brain_mask, _ = prepare_mask(data, 5000)

  pad_thickness = 2.0
  sigma = 2.0

  b_vols = spatial_smooth(data, in_brain_mask, pad_thickness, sigma, False)
  in_brain_tcs = b_vols[in_brain_mask]

  Y = in_brain_tcs.T
  Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
  unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
  U, S, V = npl.svd(unscaled_cov)

  n_betas = 11

  X = np.ones((n_trs - 5, n_betas))
  X[:, 0] = target_convolved
  X[:, 1] = nontarget_convolved
  X[:, 2] = error_convolved
  X[:, 3] = block_regressor
  X[:, 4] = block_start_cues
  X[:, 5] = block_end_cues
  X[:, 6] = linear_drift
  X[:, 7] = qudratic_drift
  X[:, 8] = U[:,0]
  X[:, 9] = U[:,1]
  # 10th column is the intercept


  # plot design matrix
  plt.figure()
  plt.imshow(X, aspect=0.1)
  plt.savefig(os.path.join(output_filename, "sub%s_task%s_design_matrix.png" % (subject_num, task_num)), format='png', dpi=500)      

  B = npl.pinv(X).dot(Y)

  # test normality of residuals
  residuals = Y.T - X.dot(B).T
  alpha_test, bonferroni_test, hochberg_test, benjamini_test = [val * 1.0 / Y.shape[-1] for val in multiple_comp(residuals)]
  normality_test_results = {"Alpha Test":alpha_test, "Bonferroni Procedure":bonferroni_test,"Hochberg Procedure":hochberg_test,"Benjamini-Hochberg Procedure":benjamini_test}

  normality_test_pd = pd.DataFrame(normality_test_results, index=["Failure Rate"])

  normality_test_pd.to_csv(os.path.join(output_filename, "sub%s_task%s_linear_model_normality_tests_failure_rates.csv" % (subject_num, task_num)))

  rs_squared = []
  for i in range(Y.shape[-1]):
    r_squared = 1 - np.sum((Y[:,i] - X.dot(B[:,i]))**2) * 1.0 / np.sum((Y[:,i] - np.mean(Y[:,i])) ** 2)
    rs_squared.append(r_squared)

  np.savetxt(os.path.join(output_filename, "glm_mean_R_squared_" + ("0_back" if task_num == "001" else "2_back") + ".txt"), np.array([np.mean(rs_squared)]))

  b_vols = np.zeros((data.shape[0:-1] + (n_betas,)))
  b_vols[in_brain_mask, :] = B.T


  # compute t values for target and nontarget betas

  t_test_target_beta = 0
  t_values = compute_t_values(X, B, Y, t_test_target_beta)
  t_vols_beta_0 = np.zeros((data.shape[0:-1]))
  t_vols_beta_0[in_brain_mask] = t_values

  t_test_target_beta = 1
  t_values = compute_t_values(X, B, Y, t_test_target_beta)
  t_vols_beta_1 = np.zeros((data.shape[0:-1]))
  t_vols_beta_1[in_brain_mask] = t_values

  # compute t values for noise regressor betas

  t_values = compute_t_values(X, B, Y, 6)
  t_vols_beta_6 = np.zeros((data.shape[0:-1]))
  t_vols_beta_6[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 7)
  t_vols_beta_7 = np.zeros((data.shape[0:-1]))
  t_vols_beta_7[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 8)
  t_vols_beta_8 = np.zeros((data.shape[0:-1]))
  t_vols_beta_8[in_brain_mask] = t_values

  t_values = compute_t_values(X, B, Y, 9)
  t_vols_beta_9 = np.zeros((data.shape[0:-1]))
  t_vols_beta_9[in_brain_mask] = t_values

  t_vols_beta_6_to_9 = [t_vols_beta_6, t_vols_beta_7, t_vols_beta_8, t_vols_beta_9]

  return b_vols, in_brain_mask, U, Y, data, t_vols_beta_0, t_vols_beta_1, t_vols_beta_6_to_9