def reconstr_nnfbp( target_path , output_path , filein , angles , ctr , weights , offsets , minIn , maxIn , NHidden , filters ): ## Read low-quality sinogram sino = io.readImage( target_path + filein ).astype( myfloat ) nang , npix = sino.shape ## Allocate array for reconstruction reco = np.zeros( ( npix , npix ) , dtype=myfloat ) ## Do the required multiple reconstructions for i in xrange( NHidden ): filt = filters[i,0:filters.shape[1]] hidRec = utils.fbp( sino , angles , [ctr,0.0] , filt ) reco += weights[i] * sigmoid( hidRec - offsets[i] ) ## Apply last sigmoid reco = sigmoid( reco - weights[-1] ) ## Adjust image range reco = 2 * ( reco - 0.25 ) * ( maxIn - minIn ) + minIn ## Save reconstruction ext = filein[len(filein)-4:] fileout = output_path + filein[:len(filein)-4] + '_nnreco' + ext io.writeImage( fileout , reco ) print( '\nSaving NN-FBP reconstruction in:\n' , fileout ) return
def main(args): if args.use_cuda: print("Using GPU") else: print("Using CPU") geo_full = build_geo(args.full_view) geo_sparse = build_geo(args.sparse_view) datasets = BuildDataSet(args.data_root_path, args.test_folder, geo_full, geo_sparse, None, "test") data_length = len(datasets) if not data_length == args.data_length: print("Args.data_length is wrong!") sys.exit(0) # s_sparse = 0 # m_sparse = 0 # p_sparse = 0 s_inter = 0 m_inter = 0 p_inter = 0 s_new = 0 m_new = 0 p_new = 0 for i in range(224): print("Testing sample {}/224".format(i)) sample = datasets[i] image_true = Any2One(sample["image_true"]) image_full = Any2One(sample["image_full"]) image_sparse = Any2One(sample["image_sparse"]) image_inter = Any2One(sample["image_inter"]) sinogram_full = sample["sinogram_full"] sinogram_sparse = sample["sinogram_sparse"] sinogram_inter = sample["sinogram_inter"] # sinogram_new = SinoInter(sinogram_inter, geo_full, -2, "sinogram_LineInter", "z") sinogram_new = newinter(sinogram_sparse) image_new = Any2One(fbp(sinogram_new, geo_full)) # ssim, mse, psnr = ssim_mse_psnr(image_full, image_sparse) # s_sparse = s_sparse + ssim # m_sparse = m_sparse + mse # p_sparse = p_sparse + psnr ssim, mse, psnr = ssim_mse_psnr(image_full, image_inter) s_inter = s_inter + ssim m_inter = m_inter + mse p_inter = p_inter + psnr ssim, mse, psnr = ssim_mse_psnr(image_full, image_new) s_new = s_new + ssim m_new = m_new + mse p_new = p_new + psnr # print("Sparse:", s_sparse/224, m_sparse/224, p_sparse/224) print("Inter:", s_inter / 224, m_inter / 224, p_inter / 224) print("New:", s_new / 224, m_new / 224, p_new / 224)
def reconstr_filter_custom(sino, angles, ctr, filt_custom, picked, nhh, l): ## Prepare filter n = len(filt_custom) nh = np.int(0.5 * n) filt = np.zeros(len(filt_custom), dtype=myfloat) filt[::2] = filt_custom[:nh] ## Reconstruction reco = utils.fbp(sino, angles, [ctr, 0.0], filt) #import myImageDisplay as dis #dis.plot( reco , 'Reconstr j='+str(ind) ) ## Pick up only selected pixels #reco = reco[ picked ] reco = reco[nhh - l:nhh + l, nhh - l:nhh + l].reshape(-1) return reco
def main(args): if args.use_cuda: print("Using GPU") else: print("Using CPU") geo_full = build_geo(args.full_view) geo_sparse = build_geo(args.sparse_view) result_path_1 = args.root_path + "/IterDa/results/Iter_1/v3/model/IterDa_E199_val_Best.pth" model_i1 = torch.load(result_path_1, map_location=torch.device('cpu')) datasets = BuildDataSet(args.data_root_path, args.test_folder, geo_full, geo_sparse, None, "test") sample = datasets[0] image_true = sample["image_true"] image_full = sample["image_full"] image_sparse = sample["image_sparse"] sinogram_sparse = sample["sinogram_sparse"] sinogram_full = sample["sinogram_full"] image_pred = pred_sample(image_sparse, model_i1) sinogram_full_pred = project(image_pred, geo_full) sinogram_sparse_pred = sparse_view_f(sinogram_full_pred, geo_full["sino_views"], geo_sparse["sino_views"]) sinogram_updata = updata_sinogram(sinogram_sparse, sinogram_full_pred) image_updata = fbp(sinogram_updata, geo_full) # plt.figure() # plt.subplot(131), plt.xticks([]), plt.yticks([]), plt.imshow(sinogram_sparse, cmap="gray"), plt.title("(a)", y=-2) # plt.subplot(132), plt.xticks([]), plt.yticks([]), plt.imshow(sinogram_sparse_pred, cmap="gray"), plt.title("(b)", y=-2) # plt.subplot(133), plt.xticks([]), plt.yticks([]), plt.imshow(sinogram_sparse - sinogram_sparse_pred, cmap="gray"), plt.title("(c)", y=-2) # plt.savefig("V:/users/gy/MyProject/Resul/Tred/Image/image4-3.png") # plt.show() plt.figure() plt.subplot(131), plt.xticks([]), plt.yticks([]), plt.imshow(image_full, cmap="gray"), plt.title("(a)", y=-2) plt.subplot(132), plt.xticks([]), plt.yticks([]), plt.imshow(image_pred, cmap="gray"), plt.title("(b)", y=-2) plt.subplot(133), plt.xticks([]), plt.yticks([]), plt.imshow(image_updata, cmap="gray"), plt.title("(c)", y=-2) # plt.savefig("V:/users/gy/MyProject/Resul/Tred/Image/image4-3.png") plt.show() ssim,se,psnr = ssim_mse_psnr(Any2One(image_full), Any2One(image_pred)) print("Pred:", ssim,se,psnr) ssim,se,psnr = ssim_mse_psnr(Any2One(image_full), Any2One(image_updata)) print("Updata:", ssim,se,psnr) print("Run Done")
def reconstr_filter_custom( sino , angles , ctr , filt_custom , picked , debug , train_path , filein , ind ): ## Prepare filter n = len( filt_custom ) nh = np.int( 0.5 * n ) filt = np.zeros( len( filt_custom ) , dtype=myfloat ) filt[::2] = filt_custom[:nh] ## Reconstruction reco = utils.fbp( sino , angles , [ctr,0.0] , filt ) ## Debugging save high- and low-quality reconstructions if debug is True: filedbg = filein ext = filedbg[len(filedbg)-4:] filedbg += '_lq0' + str( ind ) + ext io.writeImage( train_path + filedbg , reco ) ## Pick up only selected pixels reco = reco[ picked ] return reco
def create_training_file( input_path , train_path , filein , angles , npix_train_slice , idx , nang_lq , ctr_hq , nfilt , filt_custom , filt , debug ): ## Read high-quality sinogram sino_hq = io.readImage( input_path + filein ).astype( myfloat ) ## Reconstruct high-quality sinogram with standard filter params = utils.select_filter( ctr_hq , filt ) reco_hq = utils.fbp( sino_hq , angles , params , None ) ## Debugging save high- and low-quality reconstructions if debug is True: filedbg = filein ext = filedbg[len(filedbg)-4:] filedbg += '_hq' + ext io.writeImage( train_path + filedbg , reco_hq ) ## Create output training array train_data = np.zeros( ( npix_train_slice , nfilt+1 ) , dtype=myfloat ) ## Randomly select training pixels picked = utils.getPickedIndices( idx , npix_train_slice ) ## Save validation data train_data[:,-1] = reco_hq[picked] ## Downsample sinogram sino_lq , angles_lq = utils.downsample_sinogram_angles( sino_hq , angles , nang_lq ) ## Reconstruct low-quality sinograms with customized filters for j in range( nfilt ): train_data[:,j] = reconstr_filter_custom( sino_lq , angles_lq , ctr_hq , filt_custom[j,:] , picked , debug , train_path , filein , j ) ## Save training data filename = filein fileout = train_path + filename[:len(filename)-4] + '_train.npy' np.save( fileout , train_data ) print( '\nTraining data saved in:\n', fileout )
def train(dataloaders, geo_full, option, zOf, ref, weg): ssim_all = 0 mse_all = 0 psnr_all = 0 for i, batch in enumerate(dataloaders): if i < 10: print("Sample {}/{}".format(i + 1, 224)) image_full = batch[ref][0].numpy() sinogram_inter = batch["sinogram_inter"][0].numpy() sinogram_new = SinoInter(sinogram_inter, geo_full, weg, option, zOf) image_new = fbp(sinogram_new, geo_full) ssim, mse, psnr = ssim_mse_psnr(image_full, image_new) ssim_all = ssim_all + ssim mse_all = mse_all + mse psnr_all = psnr_all + psnr else: pass ssim_avg = ssim_all / 224 mse_avg = mse_all / 224 psnr_avg = psnr_all / 224 return ssim_avg, mse_avg, psnr_avg
def main(): ## Initial print print('\n') print('########################################################') print('### CREATING TRAINING DATASET ###') print('########################################################') print('\n') ## Read config file if len(sys.argv) < 2: sys.exit('\nERROR: Missing input config file .cfg!\n') else: cfg_file = open(sys.argv[1], 'r') exec(cfg_file) cfg_file.close() ## Get list of input files cwd = os.getcwd() input_path = utils.analyze_path(input_path, mode='check') os.chdir(input_path) file_list = [] file_list.append(sorted(glob.glob('*' + input_files_hq + '*'))) os.chdir(cwd) nfiles = len(file_list[0]) if nfiles == 0: sys.exit('\nERROR: No file *' + input_files_hq + '* found!\n') train_path = utils.analyze_path(train_path, mode='create') print('\nInput data folder:\n', input_path) print('\nTrain data folder:\n', train_path) print('\nInput high-quality sinograms: ', nfiles) ## Read one file sino = io.readImage(input_path + file_list[0][0]) nang, npix = sino.shape print('\nSinogram(s) with ', nang, ' views X ', npix, ' pixels') ## Create array of projection angles angles = utils.create_projection_angles(nang=nang) ## Compute number of views for low-quality training sinograms factor = nang / (nang_lq * 1.0) print('\nDownsampling factor for training sinograms: ', factor) ## Create customized filters print('\nCreating customized filters ....') filt_size = 2 * (2**int(np.ceil(np.log2(npix)))) filt_custom = utils.generateFilterBasis(filt_size, 2) nfilt = filt_custom.shape[0] ## Region of interest to select training data idx = utils.getIDX(npix, roi_l, roi_r, roi_b, roi_t) nh = np.int(npix * 0.5) l = np.abs(roi_l) ## Create training dataset print('\nCreating training dataset ....', end='') ncores_avail = mproc.cpu_count if ncores > ncores_avail: ncores = ncores_avail for i in range(nfiles): ## Read high-quality sinogram sino_hq = io.readImage(input_path + file_list[0][i]).astype(myfloat) ## Reconstruct high-quality sinogram with standard filter reco_hq = utils.fbp(sino_hq, angles, [ctr_hq, 1.0], None) ## Create output training array train_data = np.zeros((npix_train_slice, nfilt + 1), dtype=myfloat) ## Randomly select training pixels picked = utils.getPickedIndices(idx, npix_train_slice) ## Save validation data #train_data[:,-1] = reco_hq[picked] train_data[:, -1] = reco_hq[nh - l:nh + l, nh - l:nh + l].reshape(-1) ## Downsample sinogram sino_lq, angles_lq = utils.downsample_sinogram_angles( sino_hq, angles, nang_lq) ## Reconstruct low-quality sinograms with customized filters #pool = mproc.Pool( processes=ncores ) #results = [ pool.apply_async( reconstr_filter_custom , # args=( sino_lq , angles_lq , ctr_hq , filt_custom[j,:] , picked ) ) \ # for j in range( nfilt ) ] #train_data[:,:nfilt] = np.array( [ res.get() for res in results ] ).reshape( npix_train_slice , nfilt ) #pool.close() #pool.join() for j in range(nfilt): train_data[:, j] = reconstr_filter_custom(sino_lq, angles_lq, ctr_hq, filt_custom[j, :], picked, nh, l) ## Save training data filename = file_list[0][i] fileout = train_path + filename[:len(filename) - 4] + '_train.npy' np.save(fileout, train_data) print('\nTraining data saved in:\n', fileout) #filename = file_list[0][i] #fileout = train_path + filename[:len(filename)-4] + '_reco.DMP' #io.writeImage( fileout , reco_hq ) print('\n')