def run(opts): arg_perm_start = int(opts.range[0]) arg_perm_stop = int(opts.range[1]) + 1 medtype = str(opts.medtype[0]) surface = str(opts.surface[0]) #load variables y = np.load("python_temp_med_%s/merge_y.npy" % (surface)) num_vertex = np.load("python_temp_med_%s/num_vertex.npy" % (surface)) num_vertex_lh = np.load("python_temp_med_%s/num_vertex_lh.npy" % (surface)) bin_mask_lh = np.load("python_temp_med_%s/bin_mask_lh.npy" % (surface)) bin_mask_rh = np.load("python_temp_med_%s/bin_mask_rh.npy" % (surface)) n = np.load("python_temp_med_%s/num_subjects.npy" % (surface)) pred_x = np.load("python_temp_med_%s/pred_x.npy" % (surface)) depend_y = np.load("python_temp_med_%s/depend_y.npy" % (surface)) adjac_lh = np.load("python_temp_med_%s/adjac_lh.npy" % (surface)) adjac_rh = np.load("python_temp_med_%s/adjac_rh.npy" % (surface)) all_vertex = np.load("python_temp_med_%s/all_vertex.npy" % (surface)) optstfce = np.load('python_temp_med_%s/optstfce.npy' % (surface)) vdensity_lh = np.load('python_temp_med_%s/vdensity_lh.npy' % (surface)) vdensity_rh = np.load('python_temp_med_%s/vdensity_rh.npy' % (surface)) #load TFCE fucntion calcTFCE_lh = CreateAdjSet(float(optstfce[0]), float(optstfce[1]), adjac_lh) # H=2, E=1 calcTFCE_rh = CreateAdjSet(float(optstfce[0]), float(optstfce[1]), adjac_rh) # H=2, E=1 #permute Sobel Z if not os.path.exists("output_med_%s/perm_SobelZ_%s" % (surface, medtype)): os.mkdir("output_med_%s/perm_SobelZ_%s" % (surface, medtype)) os.chdir("output_med_%s/perm_SobelZ_%s" % (surface, medtype)) for iter_perm in range(arg_perm_start, arg_perm_stop): np.random.seed(int(iter_perm * 1000 + time())) print("Iteration number : %d" % (iter_perm)) indices_perm = np.random.permutation(list(range(n))) if (medtype == 'M') or (medtype == 'I'): pathA_nx = pred_x[indices_perm] pathB_nx = depend_y SobelZ = calc_sobelz(medtype, pathA_nx, pathB_nx, y, n, num_vertex) else: pathA_nx = pred_x[indices_perm] pathB_nx = depend_y[indices_perm] SobelZ = calc_sobelz(medtype, pathA_nx, pathB_nx, y, n, num_vertex) write_perm_maxTFCE_vertex("Zstat_%s" % medtype, SobelZ, num_vertex_lh, bin_mask_lh, bin_mask_rh, calcTFCE_lh, calcTFCE_rh, vdensity_lh, vdensity_rh) print( ("Finished. Randomization took %.1f seconds" % (time() - start_time)))
def run(opts): arg_predictor = opts.input[0] arg_depend = opts.input[1] medtype = opts.medtype[0] if not os.path.exists("python_temp"): print("python_temp missing!") #load variables raw_nonzero = np.load('python_temp/raw_nonzero.npy') n = raw_nonzero.shape[1] affine_mask = np.load('python_temp/affine_mask.npy') data_mask = np.load('python_temp/data_mask.npy') data_index = data_mask > 0.99 num_voxel = np.load('python_temp/num_voxel.npy') pred_x = np.genfromtxt(arg_predictor, delimiter=",") depend_y = np.genfromtxt(arg_depend, delimiter=",") imgext = '.nii.gz' #default save type is nii.gz # if not os.path.isfile('python_temp/imgext.npy'): # to maintain compability # imgext = '.nii.gz' # else: # imgext = np.load('python_temp/imgext.npy') #TFCE adjac = create_adjac_voxel(data_index, data_mask, num_voxel, dirtype=opts.tfce[2]) calcTFCE = CreateAdjSet( float(opts.tfce[0]), float(opts.tfce[1]), adjac) # i.e. default: H=2, E=2, 26 neighbour connectivity #step1 if opts.covariates: arg_covars = opts.covariates[0] covars = np.genfromtxt(arg_covars, delimiter=",") x_covars = np.column_stack([np.ones(n), covars]) y = resid_covars(x_covars, raw_nonzero) else: y = raw_nonzero.T #save np.save('python_temp/pred_x', pred_x) np.save('python_temp/depend_y', depend_y) np.save('python_temp/adjac', adjac) np.save('python_temp/medtype', medtype) np.save('python_temp/optstfce', opts.tfce) np.save('python_temp/raw_nonzero_corr', y.T.astype(np.float32, order="C")) #step2 mediation SobelZ = calc_sobelz(medtype, pred_x, depend_y, y, n, num_voxel) #write TFCE images if not os.path.exists("output_med_%s" % medtype): os.mkdir("output_med_%s" % medtype) os.chdir("output_med_%s" % medtype) write_voxelStat_img('SobelZ_%s' % medtype, SobelZ, data_mask, data_index, affine_mask, calcTFCE, imgext)
def run(opts): arg_perm_start = int(opts.range[0]) arg_perm_stop = int(opts.range[1]) + 1 medtype = str(opts.medtype[0]) #load variables num_voxel = np.load('python_temp/num_voxel.npy') n = np.load('python_temp/num_subjects.npy') ny = np.load('python_temp/raw_nonzero_corr.npy').T pred_x = np.load('python_temp/pred_x.npy') depend_y = np.load("python_temp/depend_y.npy") adjac = np.load('python_temp/adjac.npy') optstfce = np.load('python_temp/optstfce.npy') #load TFCE fucntion calcTFCE = CreateAdjSet(float(optstfce[0]), float(optstfce[1]), adjac) # H=2, E=2, 26 neighbour connectivity #permute Sobel Z values and write max TFCE values if not os.path.exists("output_med_%s/perm_SobelZ" % medtype): os.mkdir("output_med_%s/perm_SobelZ" % medtype) os.chdir("output_med_%s/perm_SobelZ" % medtype) for iter_perm in xrange(arg_perm_start, arg_perm_stop): np.random.seed(int(iter_perm * 1000 + time())) print "Iteration number : %d" % (iter_perm) indices_perm = np.random.permutation(range(n)) if (medtype == 'M') or (medtype == 'I'): pathA_nx = pred_x[indices_perm] pathB_nx = depend_y SobelZ = calc_sobelz(medtype, pathA_nx, pathB_nx, ny, n, num_voxel) else: pathA_nx = pred_x[indices_perm] pathB_nx = depend_y[indices_perm] SobelZ = calc_sobelz(medtype, pathA_nx, pathB_nx, ny, n, num_voxel) write_perm_maxTFCE_voxel('Zstat_%s' % medtype, SobelZ, calcTFCE) print("Finished. Randomization took %.1f seconds" % (time() - start_time))
def run(opts): scriptwd = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) surface = opts.surface[0] FWHM = opts.fwhm[0] #load surface data img_data_lh = nib.freesurfer.mghformat.load("lh.all.%s.%s.mgh" % (surface, FWHM)) data_full_lh = img_data_lh.get_data() data_lh = np.squeeze(data_full_lh) affine_mask_lh = img_data_lh.get_affine() n = data_lh.shape[1] # num_subjects outdata_mask_lh = np.zeros_like(data_full_lh[:, :, :, 1]) img_data_rh = nib.freesurfer.mghformat.load("rh.all.%s.%s.mgh" % (surface, FWHM)) data_full_rh = img_data_rh.get_data() data_rh = np.squeeze(data_full_rh) affine_mask_rh = img_data_rh.get_affine() outdata_mask_rh = np.zeros_like(data_full_rh[:, :, :, 1]) if not os.path.exists("lh.mean.%s.%s.mgh" % (surface, FWHM)): mean_lh = np.sum(data_lh, axis=1) / data_lh.shape[1] outmean_lh = np.zeros_like(data_full_lh[:, :, :, 1]) outmean_lh[:, 0, 0] = mean_lh nib.save(nib.freesurfer.mghformat.MGHImage(outmean_lh, affine_mask_lh), "lh.mean.%s.%s.mgh" % (surface, FWHM)) mean_rh = np.sum(data_rh, axis=1) / data_rh.shape[1] outmean_rh = np.zeros_like(data_full_rh[:, :, :, 1]) outmean_rh[:, 0, 0] = mean_rh nib.save(nib.freesurfer.mghformat.MGHImage(outmean_rh, affine_mask_rh), "rh.mean.%s.%s.mgh" % (surface, FWHM)) else: img_mean_lh = nib.freesurfer.mghformat.load("lh.mean.%s.%s.mgh" % (surface, FWHM)) mean_full_lh = img_mean_lh.get_data() mean_lh = np.squeeze(mean_full_lh) img_mean_rh = nib.freesurfer.mghformat.load("rh.mean.%s.%s.mgh" % (surface, FWHM)) mean_full_rh = img_mean_rh.get_data() mean_rh = np.squeeze(mean_full_rh) #TFCE if opts.triangularmesh: print("Creating adjacency set") if opts.inputsurfs: # 3 Neighbour vertex connectity v_lh, faces_lh = nib.freesurfer.read_geometry(opts.inputsurfs[0]) v_rh, faces_rh = nib.freesurfer.read_geometry(opts.inputsurfs[1]) else: v_lh, faces_lh = nib.freesurfer.read_geometry( "%s/fsaverage/surf/lh.sphere" % os.environ["SUBJECTS_DIR"]) v_rh, faces_rh = nib.freesurfer.read_geometry( "%s/fsaverage/surf/rh.sphere" % os.environ["SUBJECTS_DIR"]) adjac_lh = create_adjac_vertex(v_lh, faces_lh) adjac_rh = create_adjac_vertex(v_rh, faces_rh) elif opts.adjfiles: print("Loading prior adjacency set") arg_adjac_lh = opts.adjfiles[0] arg_adjac_rh = opts.adjfiles[1] adjac_lh = np.load(arg_adjac_lh) adjac_rh = np.load(arg_adjac_rh) elif opts.dist: print("Loading prior adjacency set for %s mm" % opts.dist[0]) adjac_lh = np.load("%s/adjacency_sets/lh_adjacency_dist_%s.0_mm.npy" % (scriptwd, str(opts.dist[0]))) adjac_rh = np.load("%s/adjacency_sets/rh_adjacency_dist_%s.0_mm.npy" % (scriptwd, str(opts.dist[0]))) else: print("Error") if opts.noweight or opts.triangularmesh: vdensity_lh = 1 vdensity_rh = 1 else: # correction for vertex density vdensity_lh = np.zeros((adjac_lh.shape[0])) vdensity_rh = np.zeros((adjac_rh.shape[0])) for i in range(adjac_lh.shape[0]): vdensity_lh[i] = len(adjac_lh[i]) for j in range(adjac_rh.shape[0]): vdensity_rh[j] = len(adjac_rh[j]) vdensity_lh = np.array((1 - (vdensity_lh / vdensity_lh.max()) + (vdensity_lh.mean() / vdensity_lh.max())), dtype=np.float32) vdensity_rh = np.array((1 - (vdensity_rh / vdensity_rh.max()) + (vdensity_rh.mean() / vdensity_rh.max())), dtype=np.float32) calcTFCE_lh = CreateAdjSet(float(opts.tfce[0]), float(opts.tfce[1]), adjac_lh) calcTFCE_rh = CreateAdjSet(float(opts.tfce[0]), float(opts.tfce[1]), adjac_rh) #create masks if opts.fmri: maskthresh = opts.fmri print(("fMRI threshold mask = %2.2f" % maskthresh)) bin_mask_lh = np.logical_or(mean_lh > maskthresh, mean_lh < (-1 * maskthresh)) bin_mask_rh = np.logical_or(mean_rh > maskthresh, mean_rh < (-1 * maskthresh)) elif opts.fsmask: label = opts.fsmask print(("Loading fsaverage ?l.%s.label" % label)) index_lh, _, _ = convert_fslabel("%s/fsaverage/label/lh.%s.label" % (os.environ["SUBJECTS_DIR"], label)) index_rh, _, _ = convert_fslabel("%s/fsaverage/label/rh.%s.label" % (os.environ["SUBJECTS_DIR"], label)) bin_mask_lh = np.zeros_like(mean_lh) bin_mask_lh[index_lh] = 1 bin_mask_lh = bin_mask_lh.astype(bool) bin_mask_rh = np.zeros_like(mean_rh) bin_mask_rh[index_rh] = 1 bin_mask_rh = bin_mask_rh.astype(bool) elif opts.label: label_lh = opts.label[0] label_rh = opts.label[1] index_lh, _, _ = convert_fslabel(label_lh) index_rh, _, _ = convert_fslabel(label_rh) bin_mask_lh = np.zeros_like(mean_lh) bin_mask_lh[index_lh] = 1 bin_mask_lh = bin_mask_lh.astype(bool) bin_mask_rh = np.zeros_like(mean_rh) bin_mask_rh[index_rh] = 1 bin_mask_rh = bin_mask_rh.astype(bool) elif opts.binmask: print("Loading masks") binmgh_lh = np.squeeze( nib.freesurfer.mghformat.load(opts.binmask[0]).get_data()) binmgh_rh = np.squeeze( nib.freesurfer.mghformat.load(opts.binmask[1]).get_data()) bin_mask_lh = binmgh_lh > .99 bin_mask_rh = binmgh_rh > .99 else: bin_mask_lh = mean_lh != 0 bin_mask_rh = mean_rh != 0 data_lh = data_lh[bin_mask_lh] num_vertex_lh = data_lh.shape[0] data_rh = data_rh[bin_mask_rh] num_vertex_rh = data_rh.shape[0] num_vertex = num_vertex_lh + num_vertex_rh all_vertex = data_full_lh.shape[0] if opts.input: #load variables arg_predictor = opts.input[0] arg_covars = opts.input[1] pred_x = np.genfromtxt(arg_predictor, delimiter=',') covars = np.genfromtxt(arg_covars, delimiter=',') #step1 x_covars = np.column_stack([np.ones(n), covars]) y_lh = resid_covars(x_covars, data_lh) y_rh = resid_covars(x_covars, data_rh) merge_y = np.hstack((y_lh, y_rh)) del y_lh del y_rh if opts.regressors: arg_predictor = opts.regressors[0] pred_x = np.genfromtxt(arg_predictor, delimiter=',') merge_y = np.hstack((data_lh.T, data_rh.T)) #save variables if not os.path.exists("python_temp_%s" % (surface)): os.mkdir("python_temp_%s" % (surface)) np.save("python_temp_%s/pred_x" % (surface), pred_x) np.save("python_temp_%s/num_subjects" % (surface), n) np.save("python_temp_%s/all_vertex" % (surface), all_vertex) np.save("python_temp_%s/num_vertex" % (surface), num_vertex) np.save("python_temp_%s/num_vertex_lh" % (surface), num_vertex_lh) np.save("python_temp_%s/num_vertex_rh" % (surface), num_vertex_rh) np.save("python_temp_%s/bin_mask_lh" % (surface), bin_mask_lh) np.save("python_temp_%s/bin_mask_rh" % (surface), bin_mask_rh) np.save("python_temp_%s/affine_mask_lh" % (surface), affine_mask_lh) np.save("python_temp_%s/affine_mask_rh" % (surface), affine_mask_rh) np.save("python_temp_%s/adjac_lh" % (surface), adjac_lh) np.save("python_temp_%s/adjac_rh" % (surface), adjac_rh) np.save("python_temp_%s/merge_y" % (surface), merge_y.astype(np.float32, order="C")) np.save('python_temp_%s/optstfce' % (surface), opts.tfce) np.save('python_temp_%s/vdensity_lh' % (surface), vdensity_lh) np.save('python_temp_%s/vdensity_rh' % (surface), vdensity_rh) #write TFCE images if not os.path.exists("output_%s" % (surface)): os.mkdir("output_%s" % (surface)) os.chdir("output_%s" % (surface)) #step2 X = np.column_stack([np.ones(n), pred_x]) k = len(X.T) if opts.vertexregressor: img_x_lh = np.squeeze( nib.freesurfer.mghformat.load("../%s" % opts.vertexregressor[0]).get_data()) img_x_rh = np.squeeze( nib.freesurfer.mghformat.load("../%s" % opts.vertexregressor[1]).get_data()) img_x_lh = img_x_lh[bin_mask_lh] img_x_rh = img_x_rh[bin_mask_rh] img_x = np.hstack((img_x_lh.T, img_x_rh.T)) img_x_lh = img_x_rh = None merge_y = np.hstack((data_lh.T, data_rh.T)) tvals, timage = image_regression(merge_y.T.astype(np.float32), img_x.T.astype(np.float32), pred_x, covars) VIF = image_reg_VIF(merge_y, np.column_stack((pred_x, covars))) tvals = tvals.T timage = timage.T write_vertStat_img('tstat_imgcovar', timage[:num_vertex_lh], outdata_mask_lh, affine_mask_lh, surface, 'lh', bin_mask_lh, calcTFCE_lh, bin_mask_lh.shape[0], vdensity_lh) write_vertStat_img('tstat_imgcovar', timage[num_vertex_lh:], outdata_mask_rh, affine_mask_rh, surface, 'rh', bin_mask_rh, calcTFCE_rh, bin_mask_rh.shape[0], vdensity_rh) write_vertStat_img('negtstat_imgcovar', -timage[:num_vertex_lh], outdata_mask_lh, affine_mask_lh, surface, 'lh', bin_mask_lh, calcTFCE_lh, bin_mask_lh.shape[0], vdensity_lh) write_vertStat_img('negtstat_imgcovar', -timage[num_vertex_lh:], outdata_mask_rh, affine_mask_rh, surface, 'rh', bin_mask_rh, calcTFCE_rh, bin_mask_rh.shape[0], vdensity_rh) write_vertStat_img('VIF_imgcovar', VIF[:num_vertex_lh], outdata_mask_lh, affine_mask_lh, surface, 'lh', bin_mask_lh, calcTFCE_lh, bin_mask_lh.shape[0], vdensity_lh, TFCE=False) write_vertStat_img('VIF_imgcovar', VIF[num_vertex_lh:], outdata_mask_rh, affine_mask_rh, surface, 'rh', bin_mask_rh, calcTFCE_rh, bin_mask_rh.shape[0], vdensity_rh, TFCE=False) else: invXX = np.linalg.inv(np.dot(X.T, X)) tvals = tval_int(X, invXX, merge_y, n, k, num_vertex) for j in range(k - 1): tnum = j + 1 write_vertStat_img('tstat_con%d' % tnum, tvals[tnum, :num_vertex_lh], outdata_mask_lh, affine_mask_lh, surface, 'lh', bin_mask_lh, calcTFCE_lh, bin_mask_lh.shape[0], vdensity_lh) write_vertStat_img('tstat_con%d' % tnum, tvals[tnum, num_vertex_lh:], outdata_mask_rh, affine_mask_rh, surface, 'rh', bin_mask_rh, calcTFCE_rh, bin_mask_rh.shape[0], vdensity_rh) write_vertStat_img('negtstat_con%d' % tnum, (tvals[tnum, :num_vertex_lh] * -1), outdata_mask_lh, affine_mask_lh, surface, 'lh', bin_mask_lh, calcTFCE_lh, bin_mask_lh.shape[0], vdensity_lh) write_vertStat_img('negtstat_con%d' % tnum, (tvals[tnum, num_vertex_lh:] * -1), outdata_mask_rh, affine_mask_rh, surface, 'rh', bin_mask_rh, calcTFCE_rh, bin_mask_rh.shape[0], vdensity_rh)
def run(opts): arg_perm_start = int(opts.range[0]) arg_perm_stop = int(opts.range[1]) + 1 surface = str(opts.surface[0]) if opts.exchangeblock: block_list = np.genfromtxt(opts.exchangeblock[0], dtype=np.str) indexer = np.array(range(len(block_list))) #load variables ny = np.load("python_temp_%s/merge_y.npy" % (surface)) num_vertex = np.load("python_temp_%s/num_vertex.npy" % (surface)) num_vertex_lh = np.load("python_temp_%s/num_vertex_lh.npy" % (surface)) all_vertex = np.load("python_temp_%s/all_vertex.npy" % (surface)) bin_mask_lh = np.load("python_temp_%s/bin_mask_lh.npy" % (surface)) bin_mask_rh = np.load("python_temp_%s/bin_mask_rh.npy" % (surface)) n = np.load("python_temp_%s/num_subjects.npy" % (surface)) pred_x = np.load("python_temp_%s/pred_x.npy" % (surface)) adjac_lh = np.load("python_temp_%s/adjac_lh.npy" % (surface)) adjac_rh = np.load("python_temp_%s/adjac_rh.npy" % (surface)) optstfce = np.load('python_temp_%s/optstfce.npy' % (surface)) vdensity_lh = np.load('python_temp_%s/vdensity_lh.npy' % (surface)) vdensity_rh = np.load('python_temp_%s/vdensity_rh.npy' % (surface)) #load TFCE fucntion calcTFCE_lh = CreateAdjSet(float(optstfce[0]), float(optstfce[1]), adjac_lh) # H=2, E=1 calcTFCE_rh = CreateAdjSet(float(optstfce[0]), float(optstfce[1]), adjac_rh) # H=2, E=1 #permute T values and write max TFCE values if not os.path.exists("output_%s/perm_Tstat_%s" % (surface, surface)): os.mkdir("output_%s/perm_Tstat_%s" % (surface, surface)) os.chdir("output_%s/perm_Tstat_%s" % (surface, surface)) X = np.column_stack([np.ones(n), pred_x]) k = len(X.T) for iter_perm in range(arg_perm_start, arg_perm_stop): np.random.seed(int(iter_perm * 1000 + time())) print("Iteration number : %d" % (iter_perm)) if opts.specifyvars: start = opts.specifyvars[0] stop = opts.specifyvars[1] + 1 nx = X nx[:, start:stop] = X[:, start:stop][np.random.permutation( list(range(n)))] elif opts.exchangeblock: randindex = [] for block in np.random.permutation(list(np.unique(block_list))): randindex.append( np.random.permutation(indexer[block_list == block])) randindex = np.concatenate(np.array(randindex)) nx = X[randindex] else: nx = X[np.random.permutation(list(range(n)))] invXX = np.linalg.inv(np.dot(nx.T, nx)) tvals = tval_int(nx, invXX, ny, n, k, num_vertex) if opts.specifyvars: for j in range(stop - start): tnum = j + 1 write_perm_maxTFCE_vertex('tstat_con%d' % tnum, tvals[tnum], num_vertex_lh, bin_mask_lh, bin_mask_rh, calcTFCE_lh, calcTFCE_rh, vdensity_lh, vdensity_rh) write_perm_maxTFCE_vertex('tstat_con%d' % tnum, (tvals[tnum] * -1), num_vertex_lh, bin_mask_lh, bin_mask_rh, calcTFCE_lh, calcTFCE_rh, vdensity_lh, vdensity_rh) else: for j in range(k - 1): tnum = j + 1 write_perm_maxTFCE_vertex('tstat_con%d' % tnum, tvals[tnum], num_vertex_lh, bin_mask_lh, bin_mask_rh, calcTFCE_lh, calcTFCE_rh, vdensity_lh, vdensity_rh) write_perm_maxTFCE_vertex('tstat_con%d' % tnum, (tvals[tnum] * -1), num_vertex_lh, bin_mask_lh, bin_mask_rh, calcTFCE_lh, calcTFCE_rh, vdensity_lh, vdensity_rh) print( ("Finished. Randomization took %.1f seconds" % (time() - start_time)))
def run(opts): currentTime = int(time()) if opts.multisurfacefwecorrection: ############################# ###### FWER CORRECTION ###### ############################# _, image_array, masking_array, maskname, affine_array, vertex_array, face_array, surfname, adjacency_array, tmi_history, columnids = read_tm_filetype( '%s' % opts.tmifile[0], verbose=False) # check file dimensions if not image_array[0].shape[1] % 3 == 0: print( 'Print file format is not understood. Please make sure %s is statistics file.' % opts.tmifile[0]) quit() else: num_contrasts = int(image_array[0].shape[1] / 3) # get surface coordinates in data array position_array = create_position_array(masking_array) if num_contrasts == 1: # get lists for positive and negative contrasts pos_range = [1] neg_range = [2] else: # get lists for positive and negative contrasts pos_range = list( range(num_contrasts, num_contrasts + num_contrasts)) neg_range = list( range(num_contrasts * 2, num_contrasts * 2 + num_contrasts)) # check that randomisation has been run if not os.path.exists( "%s/output_%s/perm_maxTFCE_surf0_tcon1.csv" % (os.getcwd(), opts.tmifile[0])): # make this safer print( 'Permutation folder not found. Please run --randomise first.') quit() #check permutation file lengths num_surf = len(masking_array) surface_range = list(range(num_surf)) num_perm = lowest_length(num_contrasts, surface_range, opts.tmifile[0]) if opts.setsurfacerange: surface_range = list( range(opts.setsurfacerange[0], opts.setsurfacerange[1] + 1)) elif opts.setsurface: surface_range = opts.setsurface if np.array(surface_range).max() > len(masking_array): print( "Error: range does note fit the surfaces contained in the tmi file. %s contains the following surfaces" % opts.tmifile[0]) for i in range(len(surfname)): print(("Surface %d : %s, %s" % (i, surfname[i], maskname[i]))) quit() print("Reading %d contrast(s) from %d of %d surface(s)" % ((num_contrasts), len(surface_range), num_surf)) print("Reading %s permutations with an accuracy of p=0.05+/-%.4f" % (num_perm, (2 * (np.sqrt(0.05 * 0.95 / num_perm))))) # calculate the P(FWER) images from all surfaces positive_data, negative_data = apply_mfwer(image_array, num_contrasts, surface_range, num_perm, num_surf, opts.tmifile[0], position_array, pos_range, neg_range, weight='logmasksize') # write out files if opts.concatestats: write_tm_filetype(opts.tmifile[0], image_array=positive_data, masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, adjacency_array=adjacency_array, checkname=False, tmi_history=tmi_history) _, image_array, masking_array, maskname, affine_array, vertex_array, face_array, surfname, adjacency_array, tmi_history, columnids = read_tm_filetype( opts.tmifile[0], verbose=False) write_tm_filetype(opts.tmifile[0], image_array=np.column_stack( (image_array[0], negative_data)), masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, adjacency_array=adjacency_array, checkname=False, tmi_history=tmi_history) else: for i in range(len(opts.outtype)): if opts.outtype[i] == 'tmi': contrast_names = [] for j in range(num_contrasts): contrast_names.append(("tstat_pFWER_con%d" % (j + 1))) for k in range(num_contrasts): contrast_names.append( ("negtstat_pFWER_con%d" % (k + 1))) outdata = np.column_stack((positive_data, negative_data)) if opts.neglog: for j in range(num_contrasts): contrast_names.append( ("tstat_negLog_pFWER_con%d" % (j + 1))) for k in range(num_contrasts): contrast_names.append( ("negtstat_negLog_pFWER_con%d" % (k + 1))) outdata = np.column_stack( (outdata, -np.log10(1 - positive_data))) outdata = np.column_stack( (outdata, -np.log10(1 - negative_data))) write_tm_filetype("pFWER_%s" % opts.tmifile[0], image_array=outdata, masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, checkname=False, columnids=np.array(contrast_names), tmi_history=tmi_history) else: if opts.outtype[i] == 'mgh': savefunc = savemgh_v2 if opts.outtype[i] == 'nii.gz': savefunc = savenifti_v2 if opts.outtype[i] == 'auto': savefunc = saveauto for surf_count in surface_range: start = position_array[surf_count] end = position_array[surf_count + 1] basename = strip_basename(maskname[surf_count]) if not os.path.exists("output_stats"): os.mkdir("output_stats") out_image = positive_data[start:end] temp_image = negative_data[start:end] for contrast in range(num_contrasts): out_image[temp_image[:, contrast] != 0, contrast] = temp_image[ temp_image[:, contrast] != 0, contrast] * -1 if affine_array == []: savefunc( out_image, masking_array[surf_count], "output_stats/%d_%s_pFWER" % (surf_count, basename)) else: savefunc( out_image, masking_array[surf_count], "output_stats/%d_%s_pFWER" % (surf_count, basename), affine_array[surf_count]) if opts.neglog: out_image = -np.log10(1 - positive_data[start:end, contrast]) temp_image = np.log10(1 - negative_data[start:end, contrast]) for contrast in range(num_contrasts): out_image[temp_image[:, contrast] != 0, contrast] = temp_image[ temp_image[:, contrast] != 0, contrast] if affine_array == []: savefunc( out_image, masking_array[surf_count], "output_stats/%d_%s_negLog_pFWER" % (surf_count, basename)) else: savefunc( out_image, masking_array[surf_count], "output_stats/%d_%s_negLog_pFWER" % (surf_count, basename), affine_array[surf_count]) if opts.outputply: colorbar = True if not os.path.exists("output_ply"): os.mkdir("output_ply") for contrast in range(num_contrasts): for surf_count in surface_range: start = position_array[surf_count] end = position_array[surf_count + 1] basename = strip_basename(maskname[surf_count]) if masking_array[surf_count].shape[2] > 1: img_data = np.zeros((masking_array[surf_count].shape)) combined_data = positive_data[start:end, contrast] combined_data[combined_data <= 0] = negative_data[ start:end, contrast][combined_data <= 0] * -1 combined_data[np.abs(combined_data) < float( opts.outputply[0])] = 0 img_data[masking_array[surf_count]] = combined_data v, f, values = convert_voxel( img_data, affine=affine_array[surf_count], absthreshold=float(opts.outputply[0])) if not v == []: out_color_array = paint_surface( opts.outputply[0], opts.outputply[1], opts.outputply[2], values, save_colorbar=colorbar) negvalues = values * -1 index = negvalues > float(opts.outputply[0]) out_color_array2 = paint_surface( opts.outputply[0], opts.outputply[1], opts.outputply[3], negvalues, save_colorbar=colorbar) out_color_array[index, :] = out_color_array2[ index, :] save_ply( v, f, "output_ply/%d_%s_pFWE_tcon%d.ply" % (surf_count, basename, contrast + 1), out_color_array) colorbar = False else: print("No output for %d %s T-contrast %d" % (surf_count, basename, contrast + 1)) else: img_data = np.zeros( (masking_array[surf_count].shape[0])) img_data[masking_array[surf_count][:, 0, 0] == True] = positive_data[start:end, contrast] out_color_array = paint_surface(opts.outputply[0], opts.outputply[1], opts.outputply[2], img_data, save_colorbar=colorbar) img_data[masking_array[surf_count][:, 0, 0] == True] = negative_data[start:end, contrast] index = img_data > float(opts.outputply[0]) out_color_array2 = paint_surface( opts.outputply[0], opts.outputply[1], opts.outputply[3], img_data, save_colorbar=colorbar) out_color_array[index, :] = out_color_array2[index, :] save_ply( vertex_array[surf_count], face_array[surf_count], "output_ply/%d_%s_pFWE_tcon%d.ply" % (surf_count, basename, contrast + 1), out_color_array) colorbar = False elif opts.mediationmfwe: # temporary solution -> maybe a general function instead of bulky code _, image_array, masking_array, maskname, affine_array, vertex_array, face_array, surfname, adjacency_array, tmi_history, columnids = read_tm_filetype( '%s' % opts.tmifile[0], verbose=False) # check file dimensions if not image_array[0].shape[1] % 2 == 0: print( 'Print file format is not understood. Please make sure %s is statistics file.' % opts.tmifile[0]) quit() # get surface coordinates in data array position_array = create_position_array(masking_array) # check that randomisation has been run if not os.path.exists("%s/output_%s/perm_maxTFCE_surf0_%s_zstat.csv" % (os.getcwd(), opts.tmifile[0], opts.mediationmfwe[0])): # make this safer print( 'Permutation folder not found. Please run --randomise first.') quit() #check permutation file lengths num_surf = len(masking_array) surface_range = list(range(num_surf)) num_perm = lowest_length(1, surface_range, opts.tmifile[0], medtype=opts.mediationmfwe[0]) if opts.setsurfacerange: surface_range = list( range(opts.setsurfacerange[0], opts.setsurfacerange[1] + 1)) elif opts.setsurface: surface_range = opts.setsurface if np.array(surface_range).max() > len(masking_array): print( "Error: range does note fit the surfaces contained in the tmi file. %s contains the following surfaces" % opts.tmifile[0]) for i in range(len(surfname)): print(("Surface %d : %s, %s" % (i, surfname[i], maskname[i]))) quit() print("Reading %d contrast(s) from %d of %d surface(s)" % (1, len(surface_range), num_surf)) print("Reading %s permutations with an accuracy of p=0.05+/-%.4f" % (num_perm, (2 * (np.sqrt(0.05 * 0.95 / num_perm))))) # calculate the P(FWER) images from all surfaces positive_data = apply_mfwer(image_array, 1, surface_range, num_perm, num_surf, opts.tmifile[0], position_array, [1], weight='logmasksize', mediation=True, medtype=opts.mediationmfwe[0]) if opts.outtype[0] == 'tmi': contrast_names = [] contrast_names.append(("zstat_pFWER")) outdata = positive_data if opts.neglog: contrast_names.append(("zstat_negLog_pFWER")) outdata = np.column_stack( (outdata, -np.log10(1 - positive_data))) write_tm_filetype("pFWER_%s" % (opts.tmifile[0]), image_array=outdata, masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, checkname=False, columnids=np.array(contrast_names), tmi_history=tmi_history) else: ################################## ###### STATISTICAL ANALYSIS ###### ################################## # read tmi file if opts.randomise: _, image_array, masking_array, _, _, _, _, _, adjacency_array, _, _ = read_tm_filetype( opts.tmifile[0]) _ = None else: element, image_array, masking_array, maskname, affine_array, vertex_array, face_array, surfname, adjacency_array, tmi_history, _ = read_tm_filetype( opts.tmifile[0]) # get surface coordinates in data array position_array = create_position_array(masking_array) if opts.setadjacencyobjs: if len(opts.setadjacencyobjs) == len(masking_array): adjacent_range = np.array(opts.setadjacencyobjs, dtype=np.int) else: print( "Error: # of masking arrays (%d) must and list of matching adjacency (%d) must be equal." % (len(masking_array), len(opts.setadjacencyobjs))) quit() else: adjacent_range = list(range(len(adjacency_array))) calcTFCE = [] if opts.assigntfcesettings: if not len(opts.assigntfcesettings) == len(masking_array): print( "Error: # of masking arrays (%d) must and list of matching tfce setting (%d) must be equal." % (len(masking_array), len(opts.assigntfcesettings))) quit() if not len(opts.tfce) % 2 == 0: print("Error. The must be an even number of input for --tfce") quit() tfce_settings_mask = [] for i in np.unique(opts.assigntfcesettings): tfce_settings_mask.append( (np.array(opts.assigntfcesettings) == int(i))) pointer = int(i * 2) adjacency = merge_adjacency_array( np.array(adjacent_range)[tfce_settings_mask[int(i)]], np.array(adjacency_array)[tfce_settings_mask[int(i)]]) calcTFCE.append((CreateAdjSet(float(opts.tfce[pointer]), float(opts.tfce[pointer + 1]), adjacency))) del adjacency else: adjacency = merge_adjacency_array(adjacent_range, adjacency_array) calcTFCE.append((CreateAdjSet(float(opts.tfce[0]), float(opts.tfce[1]), adjacency))) # make mega mask fullmask = create_full_mask(masking_array) if not opts.noweight: # correction for vertex density vdensity = [] #np.ones_like(masking_array) for i in range(len(masking_array)): temp_vdensity = np.zeros( (adjacency_array[adjacent_range[i]].shape[0])) for j in range(adjacency_array[adjacent_range[i]].shape[0]): temp_vdensity[j] = len( adjacency_array[adjacent_range[i]][j]) if masking_array[i].shape[2] == 1: temp_vdensity = temp_vdensity[masking_array[i][:, 0, 0] == True] vdensity = np.hstack( (vdensity, np.array((1 - (temp_vdensity / temp_vdensity.max()) + (temp_vdensity.mean() / temp_vdensity.max())), dtype=np.float32))) del temp_vdensity else: vdensity = 1 #load regressors if opts.input: for i, arg_pred in enumerate(opts.input): if i == 0: pred_x = np.genfromtxt(arg_pred, delimiter=',') else: pred_x = np.column_stack( [pred_x, np.genfromtxt(arg_pred, delimiter=',')]) if opts.covariates: covars = np.genfromtxt(opts.covariates[0], delimiter=',') x_covars = np.column_stack([np.ones(len(covars)), covars]) if opts.subset: masking_variable = np.isfinite( np.genfromtxt(str(opts.subset[0]), delimiter=',')) if opts.covariates: merge_y = resid_covars(x_covars, image_array[0][:, masking_variable]) else: merge_y = image_array[0][:, masking_variable].T print("Check dimensions") # CHECK print(merge_y.shape) else: if opts.covariates: merge_y = resid_covars(x_covars, image_array[0]) else: merge_y = image_array[0].T if opts.inputmediation: medtype = opts.inputmediation[0] pred_x = np.genfromtxt(opts.inputmediation[1], delimiter=',') depend_y = np.genfromtxt(opts.inputmediation[2], delimiter=',') if opts.covariates: covars = np.genfromtxt(opts.covariates[0], delimiter=',') x_covars = np.column_stack([np.ones(len(covars)), covars]) merge_y = resid_covars(x_covars, image_array[0]) else: merge_y = image_array[0].T if opts.regressors: arg_predictor = opts.regressors[0] pred_x = np.genfromtxt(arg_predictor, delimiter=',') if opts.subset: masking_variable = np.isfinite( np.genfromtxt(str(opts.subset[0]), delimiter=',')) merge_y = image_array[0][:, masking_variable].T else: merge_y = image_array[0].T # cleanup image_array = None adjacency_array = None adjacency = None if opts.analysisname: outname = opts.analysisname[0] else: outname = opts.tmifile[0][:-4] # make output folder if not os.path.exists("output_%s" % (outname)): os.mkdir("output_%s" % (outname)) os.chdir("output_%s" % (outname)) if opts.randomise: randTime = int(time()) mapped_y = merge_y.astype(np.float32, order="C") # removed memory mapping merge_y = None if not outname.endswith('tmi'): outname += '.tmi' if opts.inputmediation: outname = 'med_stats_' + outname else: outname = 'stats_' + outname if not os.path.exists("output_%s" % (outname)): os.mkdir("output_%s" % (outname)) os.chdir("output_%s" % (outname)) for i in range(opts.randomise[0], (opts.randomise[1] + 1)): if opts.assigntfcesettings: calc_mixed_tfce(opts.assigntfcesettings, mapped_y, masking_array, position_array, vdensity, pred_x, calcTFCE, perm_number=i, randomise=True) elif opts.inputmediation: calculate_mediation_tfce(medtype, mapped_y, masking_array, pred_x, depend_y, calcTFCE[0], vdensity, position_array, fullmask, perm_number=i, randomise=True) else: calculate_tfce(mapped_y, masking_array, pred_x, calcTFCE[0], vdensity, position_array, fullmask, perm_number=i, randomise=True) print(("Total time took %.1f seconds" % (time() - currentTime))) print(("Randomization took %.1f seconds" % (time() - randTime))) else: # Run TFCE if opts.assigntfcesettings: tvals, tfce_tvals, neg_tfce_tvals = calc_mixed_tfce( opts.assigntfcesettings, merge_y, masking_array, position_array, vdensity, pred_x, calcTFCE) elif opts.inputmediation: SobelZ, tfce_SobelZ = calculate_mediation_tfce( medtype, merge_y, masking_array, pred_x, depend_y, calcTFCE[0], vdensity, position_array, fullmask) else: tvals, tfce_tvals, neg_tfce_tvals = calculate_tfce( merge_y, masking_array, pred_x, calcTFCE[0], vdensity, position_array, fullmask) if opts.outtype[0] == 'tmi': if not outname.endswith('tmi'): outname += '.tmi' if opts.inputmediation: outname = 'med_stats_' + outname else: outname = 'stats_' + outname if opts.inputmediation: contrast_names = [] contrast_names.append(("SobelZ")) contrast_names.append(("SobelZ_tfce")) outdata = np.column_stack((SobelZ.T, tfce_SobelZ.T)) else: if tvals.ndim == 1: num_contrasts = 1 else: num_contrasts = tvals.shape[0] contrast_names = [] for i in range(num_contrasts): contrast_names.append(("tstat_con%d" % (i + 1))) for j in range(num_contrasts): contrast_names.append(("tstat_tfce_con%d" % (j + 1))) for k in range(num_contrasts): contrast_names.append( ("negtstat_tfce_con%d" % (k + 1))) outdata = np.column_stack((tvals.T, tfce_tvals.T)) outdata = np.column_stack((outdata, neg_tfce_tvals.T)) # write tstat write_tm_filetype(outname, image_array=outdata, masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, checkname=False, columnids=np.array(contrast_names), tmi_history=[]) else: print("not implemented yet")
def run(opts): try: sopts = np.load("tmi_temp/opts.npy").tolist() except: print("Error: tmi_temp was not found. Run mmr-lr first, or change directory to where tmi_temp is located.") # this error should never happen. tfce_settings = [] masking_array = np.load("tmi_temp/masking_array.npy") for surf_num in range(len(masking_array)): if sopts.assigntfcesettings: pointer = int(sopts.assigntfcesettings[surf_num] * 2) tfce_settings.append(([sopts.tfce[pointer],sopts.tfce[pointer+1]])) if opts.outputstats: # load npy objects for building the tmi file maskname = np.load("tmi_temp/maskname.npy") affine_array = np.load("tmi_temp/affine_array.npy") vertex_array = np.load("tmi_temp/vertex_array.npy") face_array = np.load("tmi_temp/face_array.npy") surfname = np.load("tmi_temp/surfname.npy") for surf_num in range(len(masking_array)): print("Calculating stats for:\t %s" % maskname[surf_num]) adjacency = np.load("tmi_temp/%d_adjacency_temp.npy" % surf_num) mask = np.load("tmi_temp/%d_mask_temp.npy" % surf_num) data = np.load("tmi_temp/%d_data_temp.npy" % surf_num) vdensity = np.load("tmi_temp/%d_vdensity_temp.npy" % surf_num) if not sopts.assigntfcesettings: calcTFCE = CreateAdjSet(float(sopts.tfce[0]), float(sopts.tfce[1]), adjacency) if sopts.input: for i, arg_pred in enumerate(sopts.input): if i == 0: pred_x = np.genfromtxt(arg_pred, delimiter=',') else: pred_x = np.column_stack([pred_x, np.genfromtxt(arg_pred, delimiter=',')]) if sopts.assigntfcesettings: calcTFCE = CreateAdjSet(float(tfce_settings[surf_num][0]), float(tfce_settings[surf_num][1]), adjacency) temp_tvals, temp_tfce_tvals, temp_neg_tfce_tvals = low_ram_calculate_tfce(data, mask, pred_x, calcTFCE, vdensity, set_surf_count = surf_num, randomise = False, no_intercept = True) if surf_num == 0: tvals = temp_tvals tfce_tvals = temp_tfce_tvals neg_tfce_tvals = temp_neg_tfce_tvals if pred_x.ndim == 1: # get number of contrasts num_contrasts = 1 else: num_contrasts = pred_x.shape[1] else: tvals = np.concatenate((tvals, temp_tvals), 1) tfce_tvals = np.concatenate((tfce_tvals, temp_tfce_tvals), 1) neg_tfce_tvals = np.concatenate((neg_tfce_tvals, temp_neg_tfce_tvals), 1) if sopts.inputmediation: medtype = sopts.inputmediation[0] pred_x = np.genfromtxt(sopts.inputmediation[1], delimiter=',') depend_y = np.genfromtxt(sopts.inputmediation[2], delimiter=',') if sopts.assigntfcesettings: calcTFCE = CreateAdjSet(float(tfce_settings[surf_num][0]), float(tfce_settings[surf_num][1]), adjacency) temp_zvals, temp_tfce_zvals = low_ram_calculate_mediation_tfce(medtype, data, mask, pred_x, depend_y, calcTFCE, vdensity, set_surf_count = surf_num, randomise = False, no_intercept = True) if surf_num == 0: zvals = temp_zvals tfce_zvals = temp_tfce_zvals if pred_x.ndim == 1: # get number of contrasts num_contrasts = 1 else: num_contrasts = pred_x.shape[1] else: zvals = np.concatenate((zvals, temp_zvals)) tfce_zvals = np.concatenate((tfce_zvals, temp_tfce_zvals)) if sopts.input: data = np.column_stack((tvals.T, tfce_tvals.T)) data = np.column_stack((data, neg_tfce_tvals.T)) contrast_names = [] for i in range(num_contrasts): contrast_names.append(("tstat_con%d" % (i+1))) for j in range(num_contrasts): contrast_names.append(("tstat_tfce_con%d" % (j+1))) for k in range(num_contrasts): contrast_names.append(("negtstat_tfce_con%d" % (k+1))) if sopts.inputmediation: data = np.column_stack((zvals.T, tfce_zvals.T)) contrast_names = [] contrast_names.append(("SobelZ")) contrast_names.append(("SobelZ_tfce")) outname = os.path.basename(str(opts.path[0]))[7:] if not outname.endswith('tmi'): outname += '.tmi' write_tm_filetype("%s/%s" % (os.path.dirname(str(opts.path[0])), outname), image_array = data, masking_array = masking_array, maskname = maskname, affine_array = affine_array, vertex_array = vertex_array, face_array = face_array, surfname = surfname, checkname = True, columnids = np.array(contrast_names), tmi_history=[]) else: currentTime=int(time()) surf_num = int(opts.surfacenumber[0]) p_range = np.array(opts.permutationrange) adjacency = np.load("tmi_temp/%d_adjacency_temp.npy" % surf_num) mask = np.load("tmi_temp/%d_mask_temp.npy" % surf_num) data = np.load("tmi_temp/%d_data_temp.npy" % surf_num) vdensity = np.load("tmi_temp/%d_vdensity_temp.npy" % surf_num) if sopts.assigntfcesettings: calcTFCE = CreateAdjSet(float(tfce_settings[surf_num][0]), float(tfce_settings[surf_num][1]), adjacency) else: calcTFCE = CreateAdjSet(float(sopts.tfce[0]), float(sopts.tfce[1]), adjacency) if sopts.input: for i, arg_pred in enumerate(sopts.input): if i == 0: pred_x = np.genfromtxt(arg_pred, delimiter=',') else: pred_x = np.column_stack([pred_x, np.genfromtxt(arg_pred, delimiter=',')]) for perm_number in range(p_range[0],int(p_range[1]+1)): low_ram_calculate_tfce(data, mask, pred_x, calcTFCE, vdensity, set_surf_count = surf_num, perm_number = perm_number, randomise = True, no_intercept = True, output_dir = str(opts.path[0]), perm_seed = int(opts.seed[0])) if sopts.inputmediation: medtype = sopts.inputmediation[0] pred_x = np.genfromtxt(sopts.inputmediation[1], delimiter=',') depend_y = np.genfromtxt(sopts.inputmediation[2], delimiter=',') for perm_number in range(p_range[0],int(p_range[1]+1)): low_ram_calculate_mediation_tfce(medtype, data, mask, pred_x, depend_y, calcTFCE, vdensity, set_surf_count = surf_num, perm_number = perm_number, randomise = True, no_intercept = True, output_dir = str(opts.path[0]), perm_seed = int(opts.seed[0])) print("Mask %d, Iteration %d -> %d took %i seconds." % (surf_num, p_range[0], p_range[1], (int(time()) - currentTime)))
def run(opts): arg_perm_start = int(opts.range[0]) arg_perm_stop = int(opts.range[1]) + 1 if opts.exchangeblock: block_list = np.genfromtxt(opts.exchangeblock[0], dtype=np.str) indexer = np.array(range(len(block_list))) np.seterr(divide="ignore", invalid="ignore") #only necessary for ANTS skeleton #load variables num_voxel = np.load('python_temp/num_voxel.npy') n = np.load('python_temp/num_subjects.npy') ny = np.load('python_temp/raw_nonzero_corr.npy').T pred_x = np.load('python_temp/pred_x.npy') adjac = np.load('python_temp/adjac.npy') ancova = np.load('python_temp/ancova.npy') optstfce = np.load('python_temp/optstfce.npy') #load TFCE fucntion calcTFCE = CreateAdjSet(float(optstfce[0]), float(optstfce[1]), adjac) # H=2, E=2, 26 neighbour connectivity #permute T values and write max TFCE values if not os.path.exists('output/perm_Tstat'): os.mkdir('output/perm_Tstat') os.chdir('output/perm_Tstat') X = np.column_stack([np.ones(n), pred_x]) k = len(X.T) if ancova == 1: for iter_perm in range(arg_perm_start, int((arg_perm_stop - 1) * 2 + 1)): np.random.seed(int(iter_perm * 1000 + time())) print("Permutation number: %d" % (iter_perm)) nx = X[np.random.permutation(list(range(n)))] perm_fvals = calcF(nx, ny, n, k) perm_fvals[perm_fvals < 0] = 0 perm_fvals = np.sqrt(perm_fvals) print(perm_fvals.max()) print(perm_fvals.min()) write_perm_maxTFCE_voxel('fstat', perm_fvals, calcTFCE) else: for iter_perm in range(arg_perm_start, arg_perm_stop): np.random.seed(int(iter_perm * 1000 + time())) print("Iteration number : %d" % (iter_perm)) if opts.specifyvars: start = opts.specifyvars[0] stop = opts.specifyvars[1] + 1 nx = X nx[:, start:stop] = X[:, start:stop][np.random.permutation( list(range(n)))] elif opts.exchangeblock: randindex = [] for block in np.random.permutation(list( np.unique(block_list))): randindex.append( np.random.permutation(indexer[block_list == block])) randindex = np.concatenate(np.array(randindex)) nx = X[randindex] else: nx = X[np.random.permutation(list(range(n)))] invXX = np.linalg.inv(np.dot(nx.T, nx)) perm_tvalues = tval_int(nx, invXX, ny, n, k, num_voxel) perm_tvalues[np.isnan( perm_tvalues)] = 0 #only necessary for ANTS skeleton if opts.specifyvars: for j in range(stop - start): tnum = j + 1 write_perm_maxTFCE_voxel('tstat_con%d' % tnum, perm_tvalues[tnum], calcTFCE) write_perm_maxTFCE_voxel('tstat_con%d' % tnum, (perm_tvalues[tnum] * -1), calcTFCE) else: for j in range(k - 1): tnum = j + 1 write_perm_maxTFCE_voxel('tstat_con%d' % tnum, perm_tvalues[tnum], calcTFCE) write_perm_maxTFCE_voxel('tstat_con%d' % tnum, (perm_tvalues[tnum] * -1), calcTFCE) print( ("Finished. Randomization took %.1f seconds" % (time() - start_time)))
def run(opts): if not os.path.exists("python_temp"): print("python_temp missing!") #load variables raw_nonzero = np.load('python_temp/raw_nonzero.npy') affine_mask = np.load('python_temp/affine_mask.npy') data_mask = np.load('python_temp/data_mask.npy') data_index = data_mask > 0.99 num_voxel = np.load('python_temp/num_voxel.npy') n = raw_nonzero.shape[1] imgext = '.nii.gz' #default save type is nii.gz # if not os.path.isfile('python_temp/imgext.npy'): # to maintain compability # imgext = '.nii.gz' # else: # imgext = np.load('python_temp/imgext.npy') #step1 if opts.input: pred_x = np.genfromtxt(opts.input[0], delimiter=',') covars = np.genfromtxt(opts.input[1], delimiter=',') x_covars = np.column_stack([np.ones(n), covars]) y = resid_covars(x_covars, raw_nonzero) np.save('python_temp/covars', covars) if opts.regressors: pred_x = np.genfromtxt(opts.regressors[0], delimiter=',') y = raw_nonzero.T if opts.onesample: pred_x = np.ones(n) pred_x[:int(n / 2)] = -1 if opts.onesample[0] != 'none': covars = np.genfromtxt(opts.onesample[0], delimiter=',') x_covars = np.column_stack([np.ones(n), covars]) y = resid_covars(x_covars, raw_nonzero) np.save('python_temp/covars', covars) else: y = raw_nonzero.T ancova = 0 if opts.ftest: ancova = 1 #TFCE adjac = create_adjac_voxel(data_index, data_mask, num_voxel, dirtype=opts.tfce[2]) calcTFCE = CreateAdjSet(float(opts.tfce[0]), float(opts.tfce[1]), adjac) # H=2, E=2, 26 neighbour connectivity #save np.save('python_temp/adjac', adjac) np.save('python_temp/pred_x', pred_x) np.save('python_temp/ancova', ancova) np.save('python_temp/optstfce', opts.tfce) np.save('python_temp/raw_nonzero_corr', y.T.astype(np.float32, order="C")) if not os.path.exists('output'): os.mkdir('output') os.chdir('output') X = np.column_stack([np.ones(n), pred_x]) k = len(X.T) if opts.onesample: if opts.onesample[0] == 'none': tvalues, _ = stats.ttest_1samp(raw_nonzero, 0, axis=1) write_voxelStat_img('tstat_intercept', tvalues, data_mask, data_index, affine_mask, calcTFCE, imgext) write_voxelStat_img('negtstat_intercept', (tvalues * -1), data_mask, data_index, affine_mask, calcTFCE, imgext) else: tvalues = tval_int(x_covars, np.linalg.inv(np.dot(x_covars.T, x_covars)), raw_nonzero.T, n, len(x_covars.T), num_voxel) tvalues = tvalues[0] write_voxelStat_img('tstat_intercept', tvalues, data_mask, data_index, affine_mask, calcTFCE, imgext) write_voxelStat_img('negtstat_intercept', (tvalues * -1), data_mask, data_index, affine_mask, calcTFCE, imgext) exit() if ancova == 0: if opts.voxelregressor: img_all_name = opts.voxelregressor[0] _, file_ext = os.path.splitext(img_all_name) if file_ext == '.gz': _, file_ext = os.path.splitext(img_all_name) if file_ext == '.mnc': imgext = '.mnc' image_x = nib.load( '../%s' % img_all_name).get_data()[data_index].astype( np.float32) else: imgext = '.nii.gz' os.system("zcat ../%s > temp_4d.nii" % img_all_name) image_x = nib.load( 'temp_4d.nii').get_data()[data_index].astype( np.float32) os.system("rm temp_4d.nii") elif file_ext == '.nii': imgext = '.nii.gz' # default to zipped images image_x = nib.load('../%s' % img_all_name).get_data()[data_index].astype( np.float32) else: print('Error filetype for %s is not supported' % img_all_name) quit() tvalues, timage = image_regression(raw_nonzero.astype(np.float32), image_x, pred_x, covars) write_voxelStat_img('tstat_imgcovar', timage, data_mask, data_index, affine_mask, calcTFCE, imgext) write_voxelStat_img('negtstat_imgcovar', -timage, data_mask, data_index, affine_mask, calcTFCE, imgext) tvalues = tvalues.T VIF = image_reg_VIF(image_x.T, np.column_stack((pred_x, covars))) write_voxelStat_img('VIF_imgcovar', VIF, data_mask, data_index, affine_mask, calcTFCE, imgext, TFCE=False) else: #multiple regression invXX = np.linalg.inv(np.dot(X.T, X)) tvalues = tval_int(X, invXX, y, n, k, num_voxel) tvalues[np.isnan(tvalues)] = 0 #only necessary for ANTS skeleton #write TFCE images for j in range(k - 1): tnum = j + 1 write_voxelStat_img('tstat_con%d' % tnum, tvalues[tnum], data_mask, data_index, affine_mask, calcTFCE, imgext) write_voxelStat_img('negtstat_con%d' % tnum, (tvalues[tnum] * -1), data_mask, data_index, affine_mask, calcTFCE, imgext) elif ancova == 1: #anova fvals = calcF(X, y, n, k) # sqrt to approximate the t-distribution fvals[fvals < 0] = 0 write_voxelStat_img('fstat', np.sqrt(fvals), data_mask, data_index, affine_mask, calcTFCE, imgext) else: print("Error") exit()
def run(opts): scriptwd = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) arg_predictor = opts.input[0] arg_depend = opts.input[1] surface = opts.surface[0] medtype = opts.medtype[0] FWHM = opts.fwhm[0] #load variables pred_x = np.genfromtxt(arg_predictor, delimiter=",") depend_y = np.genfromtxt(arg_depend, delimiter=",") #load data img_data_lh = nib.freesurfer.mghformat.load("lh.all.%s.%s.mgh" % (surface, FWHM)) data_full_lh = img_data_lh.get_data() data_lh = np.squeeze(data_full_lh) affine_mask_lh = img_data_lh.get_affine() n = data_lh.shape[1] outdata_mask_lh = np.zeros_like(data_full_lh[:, :, :, 1]) img_data_rh = nib.freesurfer.mghformat.load("rh.all.%s.%s.mgh" % (surface, FWHM)) data_full_rh = img_data_rh.get_data() data_rh = np.squeeze(data_full_rh) affine_mask_rh = img_data_rh.get_affine() outdata_mask_rh = np.zeros_like(data_full_rh[:, :, :, 1]) if not os.path.exists("lh.mean.%s.%s.mgh" % (surface, FWHM)): mean_lh = np.sum(data_lh, axis=1) / data_lh.shape[1] outmean_lh = np.zeros_like(data_full_lh[:, :, :, 1]) outmean_lh[:, 0, 0] = mean_lh nib.save(nib.freesurfer.mghformat.MGHImage(outmean_lh, affine_mask_lh), "lh.mean.%s.%s.mgh" % (surface, FWHM)) mean_rh = np.sum(data_rh, axis=1) / data_rh.shape[1] outmean_rh = np.zeros_like(data_full_rh[:, :, :, 1]) outmean_rh[:, 0, 0] = mean_rh nib.save(nib.freesurfer.mghformat.MGHImage(outmean_rh, affine_mask_rh), "rh.mean.%s.%s.mgh" % (surface, FWHM)) else: img_mean_lh = nib.freesurfer.mghformat.load("lh.mean.%s.%s.mgh" % (surface, FWHM)) mean_full_lh = img_mean_lh.get_data() mean_lh = np.squeeze(mean_full_lh) img_mean_rh = nib.freesurfer.mghformat.load("rh.mean.%s.%s.mgh" % (surface, FWHM)) mean_full_rh = img_mean_rh.get_data() mean_rh = np.squeeze(mean_full_rh) #create masks if opts.fmri: maskthresh = opts.fmri print("fMRI threshold mask = %2.2f" % maskthresh) bin_mask_lh = np.logical_or(mean_lh > maskthresh, mean_lh < (-1 * maskthresh)) bin_mask_rh = np.logical_or(mean_rh > maskthresh, mean_rh < (-1 * maskthresh)) elif opts.fsmask: label = opts.fsmask print("Loading fsaverage ?l.%s.label" % label) index_lh, _, _ = convert_fslabel("%s/fsaverage/label/lh.%s.label" % (os.environ["SUBJECTS_DIR"], label)) index_rh, _, _ = convert_fslabel("%s/fsaverage/label/rh.%s.label" % (os.environ["SUBJECTS_DIR"], label)) bin_mask_lh = np.zeros_like(mean_lh) bin_mask_lh[index_lh] = 1 bin_mask_lh = bin_mask_lh.astype(bool) bin_mask_rh = np.zeros_like(mean_rh) bin_mask_rh[index_rh] = 1 bin_mask_rh = bin_mask_rh.astype(bool) elif opts.label: label_lh = opts.label[0] label_rh = opts.label[1] index_lh, _, _ = convert_fslabel(label_lh) index_rh, _, _ = convert_fslabel(label_rh) bin_mask_lh = np.zeros_like(mean_lh) bin_mask_lh[index_lh] = 1 bin_mask_lh = bin_mask_lh.astype(bool) bin_mask_rh = np.zeros_like(mean_rh) bin_mask_rh[index_rh] = 1 bin_mask_rh = bin_mask_rh.astype(bool) elif opts.binmask: print("Loading masks") img_binmgh_lh = nib.freesurfer.mghformat.load(opts.binmask[0]) binmgh_lh = img_binmgh_lh.get_data() binmgh_lh = np.squeeze(binmgh_lh) img_binmgh_rh = nib.freesurfer.mghformat.load(opts.binmask[1]) binmgh_rh = img_binmgh_rh.get_data() binmgh_rh = np.squeeze(binmgh_rh) bin_mask_lh = binmgh_lh > .99 bin_mask_rh = binmgh_rh > .99 else: bin_mask_lh = mean_lh > 0 bin_mask_rh = mean_rh > 0 data_lh = data_lh[bin_mask_lh] num_vertex_lh = data_lh.shape[0] data_rh = data_rh[bin_mask_rh] num_vertex_rh = data_rh.shape[0] num_vertex = num_vertex_lh + num_vertex_rh all_vertex = data_full_lh.shape[0] #TFCE if opts.triangularmesh: print "Creating adjacency set" if opts.inputsurfs: # 3 Neighbour vertex connectity v_lh, faces_lh = nib.freesurfer.read_geometry(opts.inputsurfs[0]) v_rh, faces_rh = nib.freesurfer.read_geometry(opts.inputsurfs[1]) else: v_lh, faces_lh = nib.freesurfer.read_geometry( "%s/fsaverage/surf/lh.sphere" % os.environ["SUBJECTS_DIR"]) v_rh, faces_rh = nib.freesurfer.read_geometry( "%s/fsaverage/surf/rh.sphere" % os.environ["SUBJECTS_DIR"]) adjac_lh = create_adjac_vertex(v_lh, faces_lh) adjac_rh = create_adjac_vertex(v_rh, faces_rh) elif opts.adjfiles: print "Loading prior adjacency set" arg_adjac_lh = opts.adjfiles[0] arg_adjac_rh = opts.adjfiles[1] adjac_lh = np.load(arg_adjac_lh) adjac_rh = np.load(arg_adjac_rh) elif opts.dist: print "Loading prior adjacency set for %s mm" % opts.dist[0] adjac_lh = np.load("%s/adjacency_sets/lh_adjacency_dist_%s.0_mm.npy" % (scriptwd, str(opts.dist[0]))) adjac_rh = np.load("%s/adjacency_sets/rh_adjacency_dist_%s.0_mm.npy" % (scriptwd, str(opts.dist[0]))) else: print "Error" if opts.noweight: vdensity_lh = 1 vdensity_rh = 1 else: # correction for vertex density vdensity_lh = np.zeros((adjac_lh.shape[0])) vdensity_rh = np.zeros((adjac_rh.shape[0])) for i in xrange(adjac_lh.shape[0]): vdensity_lh[i] = len(adjac_lh[i]) for j in xrange(adjac_rh.shape[0]): vdensity_rh[j] = len(adjac_rh[j]) vdensity_lh = np.array((1 - (vdensity_lh / vdensity_lh.max()) + (vdensity_lh.mean() / vdensity_lh.max())), dtype=np.float32) vdensity_rh = np.array((1 - (vdensity_rh / vdensity_rh.max()) + (vdensity_rh.mean() / vdensity_rh.max())), dtype=np.float32) calcTFCE_lh = CreateAdjSet(float(opts.tfce[0]), float(opts.tfce[1]), adjac_lh) calcTFCE_rh = CreateAdjSet(float(opts.tfce[0]), float(opts.tfce[1]), adjac_rh) #save variables if not os.path.exists("python_temp_med_%s" % surface): os.mkdir("python_temp_med_%s" % surface) np.save("python_temp_med_%s/pred_x" % surface, pred_x) np.save("python_temp_med_%s/depend_y" % surface, depend_y) np.save("python_temp_med_%s/num_subjects" % surface, n) np.save("python_temp_med_%s/num_vertex" % surface, num_vertex) np.save("python_temp_med_%s/num_vertex_lh" % (surface), num_vertex_lh) np.save("python_temp_med_%s/num_vertex_rh" % (surface), num_vertex_rh) np.save("python_temp_med_%s/all_vertex" % (surface), all_vertex) np.save("python_temp_med_%s/bin_mask_lh" % (surface), bin_mask_lh) np.save("python_temp_med_%s/bin_mask_rh" % (surface), bin_mask_rh) np.save("python_temp_med_%s/adjac_lh" % (surface), adjac_lh) np.save("python_temp_med_%s/adjac_rh" % (surface), adjac_rh) np.save("python_temp_med_%s/optstfce" % (surface), opts.tfce) np.save('python_temp_med_%s/vdensity_lh' % (surface), vdensity_lh) np.save('python_temp_med_%s/vdensity_rh' % (surface), vdensity_rh) #step1 if opts.covariates: arg_covars = opts.covariates[0] covars = np.genfromtxt(arg_covars, delimiter=",") x_covars = np.column_stack([np.ones(n), covars]) y_lh = resid_covars(x_covars, data_lh) y_rh = resid_covars(x_covars, data_rh) merge_y = np.hstack((y_lh, y_rh)) del y_lh del y_rh else: #no covariates merge_y = np.hstack((data_lh.T, data_rh.T)) del data_lh del data_rh np.save("python_temp_med_%s/merge_y" % (surface), merge_y.astype(np.float32, order="C")) #step2 mediation SobelZ = calc_sobelz(medtype, pred_x, depend_y, merge_y, n, num_vertex) #write TFCE images if not os.path.exists("output_med_%s" % surface): os.mkdir("output_med_%s" % surface) os.chdir("output_med_%s" % surface) write_vertStat_img('SobelZ_%s' % (medtype), SobelZ[:num_vertex_lh], outdata_mask_lh, affine_mask_lh, surface, 'lh', bin_mask_lh, calcTFCE_lh, bin_mask_lh.shape[0], vdensity_lh) write_vertStat_img('SobelZ_%s' % (medtype), SobelZ[num_vertex_lh:], outdata_mask_rh, affine_mask_rh, surface, 'rh', bin_mask_rh, calcTFCE_rh, bin_mask_rh.shape[0], vdensity_rh)
def run(opts): currentTime = int(time()) if opts.multisurfacefwecorrection: _, image_array, masking_array, maskname, affine_array, vertex_array, face_array, surfname, adjacency_array, tmi_history, subjectids = read_tm_filetype( '%s' % opts.tmifile[0], verbose=False) # check file dimensions if not image_array[0].shape[1] % 3 == 0: print 'Print file format is not understood. Please make sure %s is statistics file.' % opts.tmifile[ 0] quit() else: num_contrasts = int(image_array[0].shape[1] / 3) # get surface coordinates in data array pointer = 0 position_array = [0] for i in range(len(masking_array)): pointer += len(masking_array[i][masking_array[i] == True]) position_array.append(pointer) del pointer if num_contrasts == 1: # get lists for positive and negative contrasts pos_range = [1] neg_range = [2] else: # get lists for positive and negative contrasts pos_range = range(num_contrasts, num_contrasts + num_contrasts) neg_range = range(num_contrasts * 2, num_contrasts * 2 + num_contrasts) # check that randomisation has been run if not os.path.exists( "%s/output_%s/perm_maxTFCE_surf0_tcon1.csv" % (os.getcwd(), opts.tmifile[0])): # make this safer print 'Permutation folder not found. Please run --randomise first.' quit() #check permutation file lengths num_surf = len(masking_array) surface_range = range(num_surf) num_perm = lowest_length(num_contrasts, surface_range, opts.tmifile[0]) if opts.setsurfacerange: surface_range = range(opts.setsurfacerange[0], opts.setsurfacerange[1] + 1) elif opts.setsurface: surface_range = opts.setsurface if np.array(surface_range).max() > len(masking_array): print "Error: range does note fit the surfaces contained in the tmi file. %s contains the following surfaces" % opts.tmifile[ 0] for i in range(len(surfname)): print("Surface %d : %s, %s" % (i, surfname[i], maskname[i])) quit() print "Reading %d contrast(s) from %d of %d surface(s)" % ( (num_contrasts), len(surface_range), num_surf) print "Reading %s permutations with an accuracy of p=0.05+/-%.4f" % ( num_perm, (2 * (np.sqrt(0.05 * 0.95 / num_perm)))) # calculate the P(FWER) images from all surfaces positive_data, negative_data = apply_mfwer(image_array, num_contrasts, surface_range, num_perm, num_surf, opts.tmifile[0], position_array, pos_range, neg_range, weight='logmasksize') # write out files if opts.concatestats: write_tm_filetype(opts.tmifile[0], image_array=positive_data, masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, adjacency_array=adjacency_array, checkname=False, tmi_history=tmi_history) _, image_array, masking_array, maskname, affine_array, vertex_array, face_array, surfname, adjacency_array, tmi_history, subjectids = read_tm_filetype( opts.tmifile[0], verbose=False) write_tm_filetype(opts.tmifile[0], image_array=np.column_stack( (image_array[0], negative_data)), masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, adjacency_array=adjacency_array, checkname=False, tmi_history=tmi_history) else: for i in range(len(opts.outtype)): if opts.outtype[i] == 'tmi': write_tm_filetype("tstats_pFWER_%s" % opts.tmifile[0], image_array=positive_data, masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, checkname=False, tmi_history=tmi_history) write_tm_filetype("negtstats_pFWER_%s" % opts.tmifile[0], image_array=negative_data, masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, checkname=False, tmi_history=tmi_history) if opts.neglog: write_tm_filetype( "tstats_negLog_pFWER_%s" % opts.tmifile[0], image_array=-np.log10(1 - positive_data), masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, checkname=False, tmi_history=tmi_history) write_tm_filetype( "negtstats_negLog_pFWER_%s" % opts.tmifile[0], image_array=-np.log10(1 - negative_data), masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, checkname=False, tmi_history=tmi_history) else: if opts.outtype[i] == 'mgh': savefunc = savemgh_v2 if opts.outtype[i] == 'nii.gz': savefunc = savenifti_v2 if opts.outtype[i] == 'auto': savefunc = saveauto for surf_count in surface_range: start = position_array[surf_count] end = position_array[surf_count + 1] basename = strip_basename(maskname[surf_count]) if not os.path.exists("output_stats"): os.mkdir("output_stats") out_image = positive_data[start:end] temp_image = negative_data[start:end] for contrast in range(num_contrasts): out_image[temp_image[:, contrast] != 0, contrast] = temp_image[ temp_image[:, contrast] != 0, contrast] * -1 if affine_array == []: savefunc( out_image, masking_array[surf_count], "output_stats/%d_%s_pFWER" % (surf_count, basename)) else: savefunc( out_image, masking_array[surf_count], "output_stats/%d_%s_pFWER" % (surf_count, basename), affine_array[surf_count]) if opts.neglog: out_image = -np.log10(1 - positive_data[start:end, contrast]) temp_image = np.log10(1 - negative_data[start:end, contrast]) for contrast in range(num_contrasts): out_image[temp_image[:, contrast] != 0, contrast] = temp_image[ temp_image[:, contrast] != 0, contrast] if affine_array == []: savefunc( out_image, masking_array[surf_count], "output_stats/%d_%s_negLog_pFWER" % (surf_count, basename)) else: savefunc( out_image, masking_array[surf_count], "output_stats/%d_%s_negLog_pFWER" % (surf_count, basename), affine_array[surf_count]) if opts.outputply: if not os.path.exists("output_ply"): os.mkdir("output_ply") for contrast in range(num_contrasts): for surf_count in surface_range: start = position_array[surf_count] end = position_array[surf_count + 1] basename = strip_basename(maskname[surf_count]) if masking_array[surf_count].shape[2] > 1: img_data = np.zeros((masking_array[surf_count].shape)) combined_data = positive_data[start:end, contrast] combined_data[combined_data <= 0] = negative_data[ start:end, contrast][combined_data <= 0] * -1 combined_data[np.abs(combined_data) < float( opts.outputply[0])] = 0 img_data[masking_array[surf_count]] = combined_data v, f, values = convert_voxel( img_data, affine=affine_array[surf_count], absthreshold=float(opts.outputply[0])) out_color_array = paint_surface( opts.outputply[0], opts.outputply[1], opts.outputply[2], values) negvalues = values * -1 index = negvalues > float(opts.outputply[0]) out_color_array2 = paint_surface( opts.outputply[0], opts.outputply[1], opts.outputply[3], negvalues) out_color_array[index, :] = out_color_array2[index, :] save_ply( v, f, "output_ply/%d_%s_pFWE_tcon%d.ply" % (surf_count, basename, contrast + 1), out_color_array) else: img_data = np.zeros( (masking_array[surf_count].shape[0])) img_data[masking_array[surf_count][:, 0, 0] == True] = positive_data[start:end, contrast] out_color_array = paint_surface( opts.outputply[0], opts.outputply[1], opts.outputply[2], img_data) img_data[masking_array[surf_count][:, 0, 0] == True] = negative_data[start:end, contrast] index = img_data > float(opts.outputply[0]) out_color_array2 = paint_surface( opts.outputply[0], opts.outputply[1], opts.outputply[3], img_data) out_color_array[index, :] = out_color_array2[index, :] save_ply( vertex_array[surf_count], face_array[surf_count], "output_ply/%d_%s_pFWE_tcon%d.ply" % (surf_count, basename, contrast + 1), out_color_array) else: # read tmi file if opts.randomise: _, image_array, masking_array, _, _, _, _, _, adjacency_array, _, _ = read_tm_filetype( opts.tmifile[0]) _ = None else: element, image_array, masking_array, maskname, affine_array, vertex_array, face_array, surfname, adjacency_array, tmi_history, _ = read_tm_filetype( opts.tmifile[0]) # get surface coordinates in data array pointer = 0 position_array = [0] for i in range(len(masking_array)): pointer += len(masking_array[i][masking_array[i] == True]) position_array.append(pointer) del pointer if opts.setadjacencyobjs: if len(opts.setadjacencyobjs) == len(masking_array): adjacent_range = opts.setadjacencyobjs else: print "Error: # of masking arrays %d must and list of matching adjacency %d must be equal." % ( len(masking_array), len(opts.setadjacencyobjs)) quit() else: adjacent_range = range(len(adjacency_array)) calcTFCE = [] if opts.setfcesettings: if len(opts.setfcesettings) == len(masking_array): print "Error: # of masking arrays %d must and list of matching tfce setting %d must be equal." % ( len(masking_array), len(opts.setfcesettings)) quit() if len(opts.tfce) % 2 != 0: print "Error. The must be an even number of input for --tfce" quit() tfce_settings_mask = [] for i in range(len(opts.tfce) / 2): tfce_settings_mask.append((opts.setfcesettings == int(i))) pointer = int(i * 2) adjacency = merge_adjacency_array( adjacent_range[tfce_settings_mask[i]], adjacency_array[tfce_settings_mask[i]]) calcTFCE.append((CreateAdjSet(float(opts.tfce[pointer]), float(opts.tfce[pointer + 1]), adjacency))) del adjacency else: adjacency = merge_adjacency_array(adjacent_range, adjacency_array) calcTFCE.append((CreateAdjSet(float(opts.tfce[0]), float(opts.tfce[1]), adjacency))) # make mega mask fullmask = [] for i in range(len(masking_array)): if masking_array[i].shape[ 2] == 1: # check if vertex or voxel image fullmask = np.hstack((fullmask, masking_array[i][:, 0, 0])) else: fullmask = np.hstack( (fullmask, masking_array[i][masking_array[i] == True])) if not opts.noweight: # correction for vertex density vdensity = [] #np.ones_like(masking_array) for i in range(len(masking_array)): temp_vdensity = np.zeros( (adjacency_array[adjacent_range[i]].shape[0])) for j in xrange(adjacency_array[adjacent_range[i]].shape[0]): temp_vdensity[j] = len( adjacency_array[adjacent_range[i]][j]) if masking_array[i].shape[2] == 1: temp_vdensity = temp_vdensity[masking_array[i][:, 0, 0] == True] vdensity = np.hstack( (vdensity, np.array((1 - (temp_vdensity / temp_vdensity.max()) + (temp_vdensity.mean() / temp_vdensity.max())), dtype=np.float32))) del temp_vdensity else: vdensity = 1 #load regressors if opts.input: arg_predictor = opts.input[0] arg_covars = opts.input[1] pred_x = np.genfromtxt(arg_predictor, delimiter=',') covars = np.genfromtxt(arg_covars, delimiter=',') x_covars = np.column_stack([np.ones(len(covars)), covars]) merge_y = resid_covars(x_covars, image_array[0]) if opts.regressors: arg_predictor = opts.regressors[0] pred_x = np.genfromtxt(arg_predictor, delimiter=',') merge_y = image_array[0].T # cleanup image_array = None adjacency_array = None adjacency = None if opts.analysisname: outname = opts.analysisname[0] else: outname = opts.tmifile[0][:-4] # make output folder if not os.path.exists("output_%s" % (outname)): os.mkdir("output_%s" % (outname)) os.chdir("output_%s" % (outname)) if opts.randomise: randTime = int(time()) mapped_y = merge_y.astype(np.float32, order="C") # removed memory mapping merge_y = None if not outname.endswith('tmi'): outname += '.tmi' outname = 'stats_' + outname if not os.path.exists("output_%s" % (outname)): os.mkdir("output_%s" % (outname)) os.chdir("output_%s" % (outname)) for i in range(opts.randomise[0], (opts.randomise[1] + 1)): calculate_tfce(mapped_y, masking_array, pred_x, calcTFCE[0], vdensity, position_array, fullmask, perm_number=i, randomise=True) print("Total time took %.1f seconds" % (time() - currentTime)) print("Randomization took %.1f seconds" % (time() - randTime)) else: # Run TFCE tvals, tfce_tvals, neg_tfce_tvals = calculate_tfce( merge_y, masking_array, pred_x, calcTFCE[0], vdensity, position_array, fullmask) if opts.outtype[0] == 'tmi': if not outname.endswith('tmi'): outname += '.tmi' outname = 'stats_' + outname # write tstat write_tm_filetype(outname, image_array=tvals.T, masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, checkname=False, tmi_history=[]) # read the tmi back in. _, image_array, masking_array, maskname, affine_array, vertex_array, face_array, surfname, _, tmi_history, subjectids = read_tm_filetype( outname, verbose=False) write_tm_filetype(outname, image_array=np.column_stack( (image_array[0], tfce_tvals.T)), masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, checkname=False, tmi_history=tmi_history) _, image_array, masking_array, maskname, affine_array, vertex_array, face_array, surfname, adjacency_array, tmi_history, subjectids = read_tm_filetype( outname, verbose=False) write_tm_filetype(outname, image_array=np.column_stack( (image_array[0], neg_tfce_tvals.T)), masking_array=masking_array, maskname=maskname, affine_array=affine_array, vertex_array=vertex_array, face_array=face_array, surfname=surfname, checkname=False, tmi_history=tmi_history) else: print "not implemented yet"