def fisher_z2r(Z): X = ne.evaluate('exp(2*Z)') return ne.evaluate('(X - 1) / (X + 1)') # get averaged (upper triangular) 1D correlation (Fisher r2z transformed) ave_filena = '/data/pt_mar006/subjects_group/corrFisherR2Z_upper.h5' ave_array = np.array(h5py.File(ave_filena, 'r')['data']) ###### Step #5: Fisher's z2r transform ############################### print "Fisher z2r transform..." ave_array = fisher_z2r(ave_array) # get the full matrix N_orig = hcp_corr.N_original(ave_array) ave_array.resize([N_orig, N_orig]) corr = hcp_corr.upper_to_down(ave_array) ###### Step #6: threshold each row of corr matrix at 90th percentile ## print "thresholding each row at its 90th percentile..." perc = np.array([np.percentile(x, 90) for x in corr]) for i in range(corr.shape[0]): #print "Row %d" % i corr[i, corr[i,:] < perc[i]] = 0 # Check for minimum & maximum value print "Minimum value is %f" % corr.min() print "Maximum value is %f" % corr.max() ## Count negative values per row #neg_values = np.array([sum(corr[i,:] < 0) for i in range(N_orig)])
for i in range(0, matrix.shape[0]): if np.count_nonzero(matrix[i, :]) == 0: cnt_zeros += 1 return cnt_zeros, matrix #### Step 1, get all connectivity matrices of given subject ######### corr_All = [] for image_rest in glob.glob(data_dir + '/' + subject_id + '*' + '/preprocessed/func/' + 'rest_preprocessed2mni_sm.nii.gz' ): [voxel_zeros, t_series] = mask_check(image_rest, image_mask) corr_upper = corrcoef_upper(t_series) N_orig = N_original(corr_upper) corr_upper.resize([N_orig, N_orig]) corr = upper_to_down(corr_upper) print image_rest, corr.shape corr_All.append(corr) corr_All = np.array(corr_All) corr_All = corr_All.T print 'input data size...', corr_All.shape ##### Step 2, get concordance value per voxel ######################## W_voxels = [] p_voxels = [] Fdist_voxels = [] ccc_voxels = []
# get mean correlation SUM = ne.evaluate("SUM / N") # Fisher z to r transform on the (averaged) upper triangular print "do Fisher z2r..." # Fisher z to r transform on average , now this is back to correlation array SUM = fisher_z2r(SUM) # transform correlation array into similarity array SUM += 1.0 SUM /= 2.0 # get full similartiy matrix N = hcp_corr.N_original(SUM) SUM.resize([N, N]) hcp_corr.upper_to_down(SUM) print "SUM.shape", SUM.shape # output prefix out_prfx = args.outprfx print "writing-out data in HDF5 format" h = h5py.File(out_prfx, "w") h.create_dataset("sum", data=SUM) h.close() # set NaN entries to 0 # SUM[np.where(np.isnan(SUM) == True)] = 0 # ignore zero entries? # ind = np.where(np.sum(SUM,axis=1) != 1)
X = ne.evaluate('exp(2*Z)') return ne.evaluate('(X - 1) / (X + 1)') # get averaged (upper triangular) 1D correlation (Fisher r2z transformed) ave_filena = '/data/pt_mar006/subjects_group/corrFisherR2Z_upper.h5' ave_array = np.array(h5py.File(ave_filena, 'r')['data']) ###### Step #5: Fisher's z2r transform ############################### print "Fisher z2r transform..." ave_array = fisher_z2r(ave_array) # get the full matrix N_orig = hcp_corr.N_original(ave_array) ave_array.resize([N_orig, N_orig]) corr = hcp_corr.upper_to_down(ave_array) ###### Step #6: threshold each row of corr matrix at 90th percentile ## print "thresholding each row at its 90th percentile..." perc = np.array([np.percentile(x, 90) for x in corr]) for i in range(corr.shape[0]): #print "Row %d" % i corr[i, corr[i, :] < perc[i]] = 0 # Check for minimum & maximum value print "Minimum value is %f" % corr.min() print "Maximum value is %f" % corr.max() ## Count negative values per row #neg_values = np.array([sum(corr[i,:] < 0) for i in range(N_orig)])