def run_expmt(): for file_id in file_id_list: f, ksp_orig = load_h5(file_id) ksp_orig = torch.from_numpy(ksp_orig) mask = get_mask(ksp_orig) net, net_input, ksp_orig_, _ = init_convdecoder(ksp_orig, mask) ksp_masked = SCALE_FAC * ksp_orig_ * mask img_masked = ifft_2d(ksp_masked) net, mse_wrt_ksp, mse_wrt_img = fit(ksp_masked=ksp_masked, img_masked=img_masked, net=net, net_input=net_input, mask2d=mask, num_iter=NUM_ITER) img_out, _ = net(net_input.type(dtype)) img_out = reshape_adj_channels_to_complex_vals(img_out[0]) ksp_est = fft_2d(img_out) ksp_dc = torch.where(mask, ksp_masked, ksp_est) img_est = crop_center( root_sum_squares(ifft_2d(ksp_est)).detach(), dim, dim) img_dc = crop_center( root_sum_squares(ifft_2d(ksp_dc)).detach(), dim, dim) img_gt = crop_center(root_sum_squares(ifft_2d(ksp_orig)), dim, dim) np.save('{}{}_est.npy'.format(path_out, file_id), img_est) np.save('{}{}_dc.npy'.format(path_out, file_id), img_dc) np.save('{}{}_gt.npy'.format(path_out, file_id), img_gt)
def run_expmt(file_id_list): for file_id in file_id_list: if os.path.exists('{}{}_dc.npy'.format(path_out, file_id)): continue f, ksp_orig = load_h5_fastmri(file_id) ksp_orig = torch.from_numpy(ksp_orig) mask = get_mask(ksp_orig) net, net_input, ksp_orig_ = init_convdecoder(ksp_orig, mask) ksp_masked = 0.1 * ksp_orig_ * mask net = fit(ksp_masked, net, net_input, mask) img_out = net(net_input.type(dtype)) img_out = reshape_adj_channels_to_complex_vals(img_out[0]) ksp_est = fft_2d(img_out) ksp_dc = torch.where(mask, ksp_masked, ksp_est) #img_est = crop_center(root_sum_squares(ifft_2d(ksp_est)).detach(), dim, dim) img_dc = crop_center(root_sum_squares(ifft_2d(ksp_dc)).detach(), dim, dim) img_gt = crop_center(root_sum_squares(ifft_2d(ksp_orig)), dim, dim) # note: use unscaled ksp_orig to make gt -- different from original processing np.save('{}{}_dc.npy'.format(path_out, file_id), img_dc) np.save('{}{}_gt.npy'.format(path_out, file_id), img_gt)
def run_demo(): ksp_orig = load_h5_fastmri(file_id=None, demo=True) mask = get_mask(ksp_orig) net, net_input, ksp_orig_ = init_convdecoder(ksp_orig) ksp_masked = 0.1 * ksp_orig_ * mask net = fit(ksp_masked, net, net_input, mask) img_out = net(net_input.type(dtype)) img_out = reshape_adj_channels_to_complex_vals(img_out[0]) ksp_est = fft_2d(img_out) ksp_dc = torch.where(mask, ksp_masked, ksp_est) img_dc = crop_center(root_sum_squares(ifft_2d(ksp_dc)).detach(), dim, dim) img_gt = crop_center(root_sum_squares(ifft_2d(ksp_orig)), dim, dim) img_zf = crop_center(root_sum_squares(ifft_2d(ksp_masked)), dim, dim) np.save('data/out.npy', img_dc) np.save('data/gt.npy', img_gt) np.save('data/zf.npy', img_zf)
def run_expmt(args): for file_id in args.file_id_list: ksp_orig = load_qdess( file_id, idx_kx=None) # default central slice in kx (axial) for accel in args.accel_list: # manage paths for input/output path_base = '/bmrNAS/people/dvv/out_qdess/accel_{}x/'.format(accel) path_out = '{}{}/'.format(path_base, args.dir_out) args.path_gt = path_base + 'gt/' if os.path.exists('{}MTR_{}_e1.npy'.format(path_out, file_id)): continue if not os.path.exists(path_out): os.makedirs(path_out) if not os.path.exists(args.path_gt): os.makedirs(args.path_gt) # initialize network net1, net_input1, ksp_orig_ = init_convdecoder( ksp_orig, fix_random_seed=False) net2, net_input2, _ = init_convdecoder(ksp_orig, fix_random_seed=False) # apply mask after rescaling k-space. want complex tensors dim (nc, ky, kz) ksp_masked, mask = apply_mask(ksp_orig_, accel, calib=args.calib, expmt=True) # fit network, get net output - default 10k iterations, lam_tv=1e-8 net1, net2 = fit(ksp_masked=ksp_masked, net1=net1, net_input1=net_input1, net2=net2, net_input2=net_input2, mask=mask) im_out1 = net1( net_input1.type(dtype)) # real tensor dim (2*nc, kx, ky) im_out2 = net2( net_input2.type(dtype)) # real tensor dim (2*nc, kx, ky) im_out = torch.mean(torch.stack([im_out1, im_out2]), dim=0) im_out = reshape_adj_channels_to_complex_vals( im_out[0]) # complex tensor dim (nc, kx, ky) # perform dc step ksp_est = fft_2d(im_out) ksp_dc = torch.where(mask, ksp_masked, ksp_est) np.save('{}/MTR_{}_ksp_dc.npy'.format(path_out, file_id), ksp_dc.detach().numpy()) # create data-consistent, ground-truth images from k-space im_1_dc = root_sum_squares(ifft_2d(ksp_dc[:8])).detach() im_2_dc = root_sum_squares(ifft_2d(ksp_dc[8:])).detach() np.save('{}MTR_{}_e1.npy'.format(path_out, file_id), im_1_dc) np.save('{}MTR_{}_e2.npy'.format(path_out, file_id), im_2_dc) # save gt w proper array scaling if dne if not os.path.exists('{}MTR_{}_e1_gt.npy'.format( args.path_gt, file_id)): im_1_gt = root_sum_squares(ifft_2d(ksp_orig[:8])) im_2_gt = root_sum_squares(ifft_2d(ksp_orig[8:])) np.save('{}MTR_{}_e1_gt.npy'.format(args.path_gt, file_id), im_1_gt) np.save('{}MTR_{}_e2_gt.npy'.format(args.path_gt, file_id), im_2_gt) print('recon {}'.format(file_id)) return
def run_expmt(): path_in = '/bmrNAS/people/arjun/data/qdess_knee_2020/files_recon_calib-16/' #files = [f for f in listdir(path_in) if isfile(join(path_in, f))] #files.sort() #NUM_SAMPS = 10 # number of samples to recon NUM_ITER = 10000 ACCEL_LIST = [8] # 4, 6, 8] for fn in test_set: #files[:NUM_SAMPS]: # load data f = h5py.File(path_in + fn, 'r') try: ksp = torch.from_numpy(f['kspace'][()]) except KeyError: print('No kspace in file {} w keys {}'.format(fn, f.keys())) f.close() continue f.close() # NOTE: if change to echo2, must manually change path nomenclature ksp_vol = ksp[:,:,:,0,:].permute(3,0,1,2) # get echo1, reshape to be (nc, kx, ky, kz) # get central slice in kx, i.e. axial plane b/c we undersample in (ky, kz) idx_kx = ksp_vol.shape[1] // 2 ksp_orig = ksp_vol[:, idx_kx, :, :] for ACCEL in ACCEL_LIST: path_out = '/bmrNAS/people/dvv/out_qdess/accel_{}x/echo1/'.format(ACCEL) # original masks created w central region 32x32 forced to 1's mask = torch.from_numpy(np.load('ipynb/masks/mask_poisson_disc_{}x.npy'.format(ACCEL))) # initialize network net, net_input, ksp_orig_, _ = init_convdecoder(ksp_orig, mask) # apply mask after rescaling k-space. want complex tensors dim (nc, ky, kz) ksp_masked = ksp_orig_ * mask img_masked = ifft_2d(ksp_masked) # fit network, get net output net, mse_wrt_ksp, mse_wrt_img = fit( ksp_masked=ksp_masked, img_masked=img_masked, net=net, net_input=net_input, mask2d=mask, num_iter=NUM_ITER) img_out, _ = net(net_input.type(dtype)) # real tensor dim (2*nc, kx, ky) img_out = reshape_adj_channels_to_complex_vals(img_out[0]) # complex tensor dim (nc, kx, ky) # perform dc step ksp_est = fft_2d(img_out) ksp_dc = torch.where(mask, ksp_masked, ksp_est) # create data-consistent, ground-truth images from k-space img_dc = root_sum_squares(ifft_2d(ksp_dc)).detach() img_gt = root_sum_squares(ifft_2d(ksp_orig)) # save results samp = fn.split('.h5')[0] #+ '_echo2' np.save('{}{}_dc.npy'.format(path_out, samp), img_dc) np.save('{}{}_gt.npy'.format(path_out, samp), img_gt) print('recon {}'.format(samp)) return