def get_train_data(num): global msk train_data = () for i in range(len(volume)): vol_x = util.nii_loader(dataroot + volume[i] + '.nii.gz') vol_y = util.nii_loader(dataroot + 'sf(' + volume[i] + ').nii.gz') # extract vessel layers vol_x = vol_x[:, slc_range[i][0]:slc_range[i][1], :] vol_y = vol_y[:, slc_range[i][0]:slc_range[i][1], :] # size of en-face slices H, slc, W = vol_x.shape # iterate over the vessel layers for j in range(slc): x = util.ImageRescale(vol_x[:, j, :], [0, 255]) y = util.ImageRescale(vol_y[:, j, :], [0, 1]) # samples from single image for k in range(num): pseed = [ random.randint(0, H - msk[0]), random.randint(0, W - msk[1]) ] im_x = x[pseed[0]:pseed[0] + msk[0], pseed[1]:pseed[1] + msk[1]] im_y = y[pseed[0]:pseed[0] + msk[0], pseed[1]:pseed[1] + msk[1]] train_data = train_data + ( (im_x, im_y), (np.fliplr(im_x), np.fliplr(im_y)), (np.flipud(im_x), np.flipud(im_y)), ) return train_data
def greedy(stack, temp): # clean the temp directory for file in os.listdir(temp): os.remove(temp + file) h, slc, w = stack.shape if slc % 2 == 1: r = int((slc - 1) / 2) else: raise ValueError("Slice Number as to be odd.") stack_opt = np.zeros(stack.shape, dtype=np.float32) # non-stepwise deformable registration im_fix = stack[:, r, :] util.nii_saver(im_fix, temp, 'im_fix.nii') for i in range(slc): im_mov = stack[:, i, :] util.nii_saver(im_mov, temp, 'im_mov.nii') subprocess.call("/home/dewei/tool/greedy.sh") stack_opt[:, i, :] = util.nii_loader(temp + 'warped.nii') return stack_opt
def __init__(self, root): self.vx = util.nii_loader(root) n, h, w = self.vx.shape self.data = [] for i in range(radius, n - radius): x = np.zeros([3, 512, 512], dtype=np.float32) im_fix = np.ascontiguousarray(np.float32(self.vx[i, :, :])) mov_pre = np.ascontiguousarray(np.float32(self.vx[i - 1, :, :])) mov_post = np.ascontiguousarray(np.float32(self.vx[i + 1, :, :])) x[0, :, :w] = MC.MotionCorrect(im_fix, mov_pre) x[2, :, :w] = MC.MotionCorrect(im_fix, mov_post) x[1, :, :w] = im_fix self.data.append(x)
im_fix = Image.fromarray(np.float32(stack[:, r, :])) im_fix.save(temp + 'fix_img.tif') for i in range(slc): im_mov = Image.fromarray(np.float32(stack[:, i, :])) im_mov.save(temp + 'atlas{}.tif'.format(i)) subprocess.call("/home/dewei/tool/self_fusion.sh") im_sf = io.imread(temp + 'synthResult.tif') return im_sf if __name__ == "__main__": dataroot = '/home/dewei/Desktop/octa/data/' temp = '/home/dewei/Desktop/octa/temp/' vol = util.nii_loader(dataroot + 'orig_fovea.nii.gz') stack = vol[:, 90:99, :] stack_opt = greedy(stack, temp) im_sf = sf(stack_opt, temp) plt.figure(figsize=(12, 12)) plt.axis('off') plt.title('sf vs. local-proj') plt.imshow(np.concatenate((im_sf, np.mean(stack, axis=1)), axis=1), cmap='gray') plt.show() print('Execution finished.')
file = 'fish_test.csv' df = pd.read_csv(root + file) row, col = df.shape opt = () for i in range(row): # index 0 in col indicate ID of the fish fish_root = dataroot + np.str(df.loc[i][0]) + '\\' print('fish: {}, samples: {}'.format(df.loc[i][0], len(opt))) # get session number for session in os.listdir(fish_root): session_root = fish_root + session + '\\' j = np.int(session[-1]) for item in os.listdir(session_root): if item.endswith('L.nii.gz'): vol_L = util.nii_loader(session_root + item) elif item.endswith('R.nii.gz'): vol_R = util.nii_loader(session_root + item) if type(df.loc[i][j]) == str: lib = df.loc[i][j] rng_left, rng_right = string2idx(lib) vol = np.float32(vol_L[10:-10, rng_left[0]:rng_left[1], 210:690]) vol = util.ImageRescale(vol, [0, 255]) util.nii_saver(vol, dataroot + 'test_data\\', 'v30s{}.nii.gz'.format(j))
seg_enc = (8, 16, 32, 64, 64) syn_enc = (8, 16, 32) t = 3 num = 6 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # denoise: orig -> LF{orig} = dn{orig} model_dn = arch.res_UNet(dnoi_enc).to(device) model_dn.load_state_dict(torch.load(modelroot + 'dn_orig_proj(orig).pt')) # contrast enhance: dn{orig} -> CE-LF{orig} = ce{orig} model_refine = arch.VAE(seg_enc, syn_enc, t).to(device) model_refine.load_state_dict(torch.load(modelroot + 'vae_refine.pt')) # segmentation: seg{orig} -> LF{orig} model_vae = arch.VAE(seg_enc, syn_enc, t).to(device) model_vae.load_state_dict(torch.load(modelroot + 'vae.pt')) #%% input volume vol = util.nii_loader(dataroot + 'fovea5.nii.gz') vol_AR = util.nii_loader(dataroot + 'AR_fovea5.nii.gz') vol = vol[30:433, 30:40, :] vol_AR = vol_AR[30:433, 30:40, :] vol_dn, vol_seg, vol_syn = SegVAE(vol_AR, num, model_dn, model_refine, model_vae) util.nii_saver(vol_AR, saveroot, 'orig5.nii.gz') util.nii_saver(vol_dn, saveroot, 'vol_dn5.nii.gz') util.nii_saver(vol_seg, saveroot, 'vol_seg5.nii.gz') util.nii_saver(vol_syn, saveroot, 'vol_syn5.nii.gz')
im_seg = Int8(vol_seg[:, i, :]) diffuse_seg = anisotropic_diffusion(im_seg, niter=5, option=2).astype(np.float32) im_enhance = ContrastEnhance(diffuse_seg) otsu_th_opt = threshold_otsu(im_enhance) vol_opt[:, i, :] = np.uint8(im_enhance > otsu_th_opt) * 255 if verbose == True and i == idx: plt.figure(figsize=(18, 8)) plt.axis('off') plt.title('base1 -- base2 -- proposed', fontsize=15) plt.imshow(np.concatenate( (vol_base_1[:, i, :], vol_base_2[:, i, :], vol_opt[:, i, :]), axis=1), cmap='gray') plt.show() return vol_base_1, vol_base_2, vol_opt if __name__ == "__main__": dataroot = "E:\\OCTA\\result\\" vol_seg = util.nii_loader(dataroot + "vol_seg5.nii.gz") vol = util.nii_loader(dataroot + "orig5.nii.gz") vol_base_1, vol_base_2, vol_opt = binarize(vol, vol_seg, True) util.nii_saver(vol_opt, dataroot, 'binary5.nii.gz') util.nii_saver(vol_base_1, dataroot, 'binary5_base_1.nii.gz') util.nii_saver(vol_base_2, dataroot, 'binary5_base_2.nii.gz')
import matplotlib.pyplot as plt global nFrame, radius nFrame = 5 radius = 3 dataroot = 'E:\\HumanData\\' fovea_list = [] vm_train = () for file in os.listdir(dataroot): if file.startswith('HN_Fovea') and file.endswith('1.nii.gz'): fovea_list.append(file) for vol in range(len(fovea_list)): print('volume :{}'.format(fovea_list[vol])) v = util.nii_loader(dataroot+fovea_list[vol]) v = v[0,:,:,:] slc, H, W = v.shape show = random.randint(0,slc-1) for i in range(radius,slc-radius): stack = v[i-radius:i+radius+1,:,:] opt = np.zeros([2*radius+1,H,H],dtype=np.float32) im_fix = np.ascontiguousarray(v[i,:,:]) for j in range(2*radius+1): im_mov = np.ascontiguousarray(stack[j,:,:]) opt[j,:,:W] = MC.MotionCorrect(im_fix,im_mov) vm_train = vm_train+(opt,) if i == show:
dst=0, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) return im_opt dataroot = 'E:\\OCTA\\data\\R=3\\' saveroot = 'E:\\OCTA\\data\\pre_processed\\' r_var = 3 r_proj = 7 for file in os.listdir(dataroot): if file.startswith('AR') and file.endswith('.nii.gz'): vol = util.nii_loader(dataroot + file) h, slc, w = vol.shape # local_proj{orig} orig_proj = loc_Proc(vol, 3, 3, slc - 3, 'mean') # var var = loc_Proc(vol, r_var, r_var, slc - r_var, 'var') # local_proj{var} _, slc, _ = var.shape var_proj = loc_Proc(var, r_proj, r_proj, slc - r_proj, 'mean') vol_vp, vol_v = cutter(var_proj, var) _, vol_op = cutter(var_proj, orig_proj) _, vol_orig = cutter(var_proj, vol) for i in range(w): vol_op[:, :, i] = ContrastEnhance(vol_op[:, :, i])
def __init__(self,root): self.data = [] self.vol = util.nii_loader(root) _, self.slc, _ = self.vol.shape for i in range(self.slc): self.data.append(self.vol[:,i,:])
import sys sys.path.insert(0, 'E:\\tools\\') import util import numpy as np def cca(vol_cca, size_th=30): num_component = int(vol_cca.max()) hist = np.histogram(vol_cca, num_component) size_vec = hist[0] cca_th = np.sum(np.uint8(size_vec > size_th)) - 1 h, d, w = vol_cca.shape vol_opt = np.zeros(vol_cca.shape, dtype=np.int) for i in range(h): for j in range(d): for k in range(w): if (vol_cca[i, j, k] <= cca_th and vol_cca[i, j, k] > 0): vol_opt[i, j, k] = 1 return vol_opt if __name__ == "__main__": dataroot = 'E:\\OCTA\\binarize\\' vth = 15 vol_cca = util.nii_loader(dataroot + 'vol_cca.nii.gz') vol_opt = cca(vol_cca, vth) util.nii_saver(vol_opt, dataroot, 'mask_TH={}.nii.gz'.format(vth))
if __name__ == '__main__': dataroot = 'E:\\HumanData\\' global n_seq n_seq = 3 fovea_x = [] fovea_y = [] pair = () for file in os.listdir(dataroot): if file.startswith('HN_Fovea') and file.endswith('.nii.gz'): fovea_x.append(file) elif file.startswith('LN_Fovea') and file.endswith('.nii.gz'): fovea_y.append(file) fovea_x.sort() fovea_y.sort() t1 = time.time() for i in range(len(fovea_x)): print('volume: {} pairing...'.format(fovea_x[i])) Vx = util.nii_loader(dataroot + fovea_x[i]) Vy = util.nii_loader(dataroot + fovea_y[i]) Vx = Vx[0, :, :, :] pair = GetPair(Vx, Vy, pair, True) t2 = time.time() print('train data paired. time: {} min'.format((t2 - t1) / 60)) with open(dataroot + 'UNet+LSTM_train.pickle', 'wb') as handle: pickle.dump(pair, handle)
""" Created on Sun Nov 29 00:32:42 2020 @author: hudew """ import sys sys.path.insert(0, 'E:\\tools\\') import util import numpy as np import matplotlib.pyplot as plt dataroot = 'E:\\OCTA\\result\\' saveroot = 'E:\\OCTA\\eval\\' vol = util.nii_loader(dataroot + 'orig5.nii.gz') vol_seg = util.nii_loader(dataroot + 'vol_seg5.nii.gz') vol_base = util.nii_loader(dataroot + 'binary5_base_2.nii.gz') vol_binary = util.nii_loader('E:\\OCTA\\result5_TH=30.nii.gz') vol_mseg = util.nii_loader('E:\\OCTA\\manualseg_fovea5.nii.gz') #%% h, slc, w = vol.shape crop = np.zeros([100, slc, 100], dtype=np.float32) crop_seg = np.zeros([100, slc, 100], dtype=np.float32) crop_mseg = np.zeros([100, slc, 100], dtype=np.float32) crop_base = np.zeros([100, slc, 100], dtype=np.float32) crop_binary = np.zeros([100, slc, 100], dtype=np.float32) for i in range(slc): crop[:, i, :] = vol[220:320, i, 180:280]
plt.imshow(top, cmap='gray') # plt.savefig("E:\\OCTA\\result\\vis.jpg") plt.show() plt.figure(figsize=(15, 5)) plt.axis('off') plt.imshow(bot, cmap='gray') # plt.savefig("E:\\OCTA\\result\\vis.jpg") plt.show() return vol_base_1, vol_base_2, vol_opt #%% import warnings warnings.filterwarnings("ignore") result_root = 'E:\\Fish\\test_result\\' data_root = 'E:\\Fish\\test_data\\' for i in range(1, 7): vol_latent = util.nii_loader(result_root + 'v30s{}_latent.nii'.format(i)) vol_x = util.nii_loader(data_root + 'v30s{}.nii.gz'.format(i)) vol_base_1, vol_base_2, vol_opt = binarize(vol_x, vol_latent, True) util.nii_saver(vol_base_1, result_root, 'v30s{}_bin_kmean.nii.gz'.format(i)) util.nii_saver(vol_base_2, result_root, 'v30s{}_bin_otsu.nii.gz'.format(i)) util.nii_saver(vol_opt, result_root, 'v30s{}_bin_LIFE.nii.gz'.format(i))
Created on Tue Nov 17 01:00:17 2020 @author: hudew """ import sys sys.path.insert(0, 'E:\\tools\\') import util import pickle, random, os, time import numpy as np import matplotlib.pyplot as plt import pyelastix dataroot = 'E:\\OCTA\\data\\' vol = util.nii_loader(dataroot + 'fovea_svd.nii') vol = np.transpose(vol, (2, 1, 0)) r = 3 def deform(im_fix, im_mov): im_fix = np.ascontiguousarray(im_fix) im_mov = np.ascontiguousarray(im_mov) # Get params and change a few values params = pyelastix.get_default_params(type='BSPLINE') params.NumberOfResolutions = 4 params.MaximumNumberOfIterations = 500 params.FinalGridSpacingInVoxels = 10
for file in os.listdir(dataroot): if file.startswith('HN_ONH'): hn_list.append(file) elif file.startswith('LN_ONH'): ln_list.append(file) elif file.startswith('SF_ONH'): sf_list.append(file) hn_list.sort() ln_list.sort() sf_list.sort() pair_data = () for i in range(len(hn_list)): ln = util.nii_loader(dataroot + ln_list[i]) ln = ln[radius:-radius, :, :] sf_ln = util.nii_loader(dataroot + sf_list[i]) Nsf_test_loader = Data.DataLoader(dataset=Nsf_test_dataset(dataroot + hn_list[i])) print('dataloader {} created'.format(hn_list[i])) for step, x in enumerate(Nsf_test_loader): with torch.no_grad(): x = Variable(x).to(device) pred = Nsf(x).detach().cpu().numpy() # high noise bscan -- Nsf(x) -- sobel x_stack = np.zeros([3, 512, 512], dtype=np.float32)
import the whole volume, radius output the self-fused volume ''' import util, tool import os import numpy as np import matplotlib.pyplot as plt dataroot = '/home/dewei/Desktop/octa/data/' temp = '/home/dewei/Desktop/octa/temp/' volume = ["vol_octa"] radius = 3 for i in range(len(volume)): vol = util.nii_loader(dataroot + volume[i] + '.nii.gz') vol = np.transpose(vol, [1, 0, 2]) h, slc, w = vol.shape n_slc = slc - 2 * radius # define the output volume vol_reg = np.zeros([h, n_slc, w], dtype=np.float32) vol_non_reg = np.zeros([h, n_slc, w], dtype=np.float32) for j in range(radius, slc - radius): stack = vol[:, j - radius:j + radius + 1, :] stack_rg = tool.greedy(stack, temp) vol_non_reg[:, j - radius, :] = util.ImageRescale(tool.sf(stack, temp), [0, 255])
#%% dataroot = 'E:\\HumanData\\' HN_list = [] SF_list = [] for file in os.listdir(dataroot): if file.startswith('HN'): HN_list.append(file) elif file.startswith('SF'): SF_list.append(file) HN_list.sort() SF_list.sort() # use only 2 adjacent neighbors global nch nch = 3 train_data = [] for i in range(len(HN_list)): HN = util.nii_loader(dataroot + HN_list[i]) HN = HN[3:-3, :, :] # hard coded, radius of self-fusion used SF = util.nii_loader(dataroot + SF_list[i]) print('volume {} pairing...'.format(i + 1)) train_data = GetPair(HN, SF, train_data) with open(dataroot + 'PM_traindata.pickle', 'wb') as handle: pickle.dump(train_data, handle) print('done.')