def test_mixedL12Norm(self): numpy.random.seed(1) M, N, K = 2, 3, 5 ig = ImageGeometry(voxel_num_x=M, voxel_num_y=N) u1 = ig.allocate('random') u2 = ig.allocate('random') U = BlockDataContainer(u1, u2, shape=(2, 1)) # Define no scale and scaled f_no_scaled = MixedL21Norm() f_scaled = 1 * MixedL21Norm() # call a1 = f_no_scaled(U) a2 = f_scaled(U) self.assertNumpyArrayAlmostEqual(a1, a2) tmp = [el**2 for el in U.containers] self.assertBlockDataContainerEqual(BlockDataContainer(*tmp), U.power(2)) z1 = f_no_scaled.proximal_conjugate(U, 1) u3 = ig.allocate('random') u4 = ig.allocate('random') z3 = BlockDataContainer(u3, u4, shape=(2, 1)) f_no_scaled.proximal_conjugate(U, 1, out=z3) self.assertBlockDataContainerAlmostEqual(z3, z1, decimal=5)
def setUp(self): ig = ImageGeometry(2, 3, 2) data = ig.allocate(1, dtype=np.float32) noisy_data = data + 1 # TV regularisation parameter self.alpha = 1 self.fidelities = [ 0.5 * L2NormSquared(b=noisy_data), L1Norm(b=noisy_data), KullbackLeibler(b=noisy_data, use_numba=False) ] F = self.alpha * MixedL21Norm() K = GradientOperator(ig) # Compute operator Norm normK = K.norm() # Primal & dual stepsizes self.sigma = 1. / normK self.tau = 1. / normK self.F = F self.K = K
def test_compare_with_PDHG(self): # Load an image from the CIL gallery. data = dataexample.SHAPES.get() ig = data.geometry # Add gaussian noise noisy_data = applynoise.gaussian(data, seed=10, var=0.005) # TV regularisation parameter alpha = 1 # fidelity = 0.5 * L2NormSquared(b=noisy_data) # fidelity = L1Norm(b=noisy_data) fidelity = KullbackLeibler(b=noisy_data, use_numba=False) # Setup and run the PDHG algorithm F = BlockFunction(alpha * MixedL21Norm(), fidelity) G = ZeroFunction() K = BlockOperator(GradientOperator(ig), IdentityOperator(ig)) # Compute operator Norm normK = K.norm() # Primal & dual stepsizes sigma = 1. / normK tau = 1. / normK pdhg = PDHG(f=F, g=G, operator=K, tau=tau, sigma=sigma, max_iteration=100, update_objective_interval=10) pdhg.run(verbose=0) sigma = 1 tau = sigma / normK**2 admm = LADMM(f=G, g=F, operator=K, tau=tau, sigma=sigma, max_iteration=100, update_objective_interval=10) admm.run(verbose=0) from cil.utilities.quality_measures import psnr if debug_print: print("PSNR", psnr(admm.solution, pdhg.solution)) np.testing.assert_almost_equal(psnr(admm.solution, pdhg.solution), 84.46678222768597, decimal=4)
def test_smoothL21Norm(self): ig = ImageGeometry(4, 5) bg = BlockGeometry(ig, ig) epsilon = 0.5 f1 = SmoothMixedL21Norm(epsilon) x = bg.allocate('random', seed=10) print("Check call for smooth MixedL21Norm") # check call res1 = f1(x) res2 = (x.pnorm(2)**2 + epsilon**2).sqrt().sum() # alternative tmp1 = x.copy() tmp1.containers += (epsilon, ) res3 = tmp1.pnorm(2).sum() numpy.testing.assert_almost_equal(res1, res2, decimal=5) numpy.testing.assert_almost_equal(res1, res3, decimal=5) print("Check gradient for smooth MixedL21Norm ... OK ") res1 = f1.gradient(x) res2 = x.divide((x.pnorm(2)**2 + epsilon**2).sqrt()) numpy.testing.assert_array_almost_equal( res1.get_item(0).as_array(), res2.get_item(0).as_array()) numpy.testing.assert_array_almost_equal( res1.get_item(1).as_array(), res2.get_item(1).as_array()) # check with MixedL21Norm, when epsilon close to 0 print("Check as epsilon goes to 0 ... OK") f1 = SmoothMixedL21Norm(1e-12) f2 = MixedL21Norm() res1 = f1(x) res2 = f2(x) numpy.testing.assert_almost_equal(f1(x), f2(x))
def __init__(self, max_iteration=100, tolerance = None, correlation = "Space", backend = "c", lower = -numpy.inf, upper = numpy.inf, info = False): super(TotalVariation, self).__init__(L = None) # Regularising parameter = alpha self.regularisation_parameter = 1. # Iterations for FGP_TV self.iterations = max_iteration # Tolerance for FGP_TV self.tolerance = tolerance # Define (ISOTROPIC) Total variation penalty ( Note it is without the regularisation paremeter) # TODO add anisotropic??? self.TV = MixedL21Norm() # correlation space or spacechannels self.correlation = correlation self.backend = backend # Define orthogonal projection onto the convex set C self.lower = lower self.upper = upper self.tmp_proj_C = IndicatorBox(lower, upper).proximal # Setup GradientOperator as None. This is to avoid domain argument in the __init__ self._gradient = None self._domain = None self.pptmp = None self.pptmp1 = None # Print stopping information (iterations and tolerance error) of FGP_TV self.info = info
def test_TranslateFunction_MixedL21Norm(self): print("Test for TranslateFunction for MixedL21Norm") ig = ImageGeometry(4, 4) Grad = GradientOperator(ig) b = Grad.range_geometry().allocate('random', seed=10) alpha = 0.4 f1 = alpha * MixedL21Norm() fun = TranslateFunction(f1, b) tmp_x = Grad.range_geometry().allocate('random', seed=10) res1 = fun(tmp_x) res2 = f1(tmp_x - b) self.assertAlmostEqual(res1, res2) print("Check call...OK") res1 = f1.convex_conjugate(tmp_x) - b.dot(tmp_x) res2 = fun.convex_conjugate(tmp_x) self.assertAlmostEqual(res1, res2) print("Check convex conjugate...OK (maybe inf=inf)") tau = 0.4 res1 = fun.proximal(tmp_x, tau) res2 = f1.proximal(tmp_x - b, tau) + b self.assertNumpyArrayAlmostEqual( res1.get_item(0).as_array(), res2.get_item(0).as_array()) self.assertNumpyArrayAlmostEqual( res1.get_item(1).as_array(), res2.get_item(1).as_array()) print("Check prox...OK ")
def set_up_TV_regularisation(image_geometry: ImageGeometry, acquisition_data: AcquisitionData, alpha: float): # Forward operator A2d = ProjectionOperator(image_geometry, acquisition_data.geometry, 'gpu') # Set up TV regularisation # Define Gradient Operator and BlockOperator Grad = GradientOperator(image_geometry) K = BlockOperator(alpha * Grad, A2d) # Define BlockFunction F using the MixedL21Norm() and the L2NormSquared() # alpha = 1.0 # f1 = alpha * MixedL21Norm() f1 = MixedL21Norm() # f2 = 0.5 * L2NormSquared(b=ad2d) f2 = L2NormSquared(b=acquisition_data) # F = BlockFunction(f1,f2) # Define Function G simply as zero G = ZeroFunction() return (K, f1, f2, G)
from cil.framework import ImageGeometry, BlockGeometry from cil.optimisation.operators import GradientOperator, IdentityOperator, BlockOperator import numpy import numpy as np ig = ImageGeometry(M, N) BG = BlockGeometry(ig, ig) u = ig.allocate('random_int') B = BlockOperator( GradientOperator(ig), IdentityOperator(ig) ) U = B.direct(u) b = ig.allocate('random_int') f1 = 10 * MixedL21Norm() f2 = 5 * L2NormSquared(b=b) f = BlockFunction(f1, f2) print(f.L) f = BlockFunction(f2, f2) print(f.L) # tau = 0.3 # # print( " without out " ) # res_no_out = f.proximal_conjugate( U, tau) # res_out = B.range_geometry().allocate() # f.proximal_conjugate( U, tau, out = res_out) # # numpy.testing.assert_array_almost_equal(res_no_out[0][0].as_array(), \
def test_SPDHG_vs_SPDHG_explicit_axpby(self): data = dataexample.SIMPLE_PHANTOM_2D.get(size=(128, 128)) if debug_print: print("test_SPDHG_vs_SPDHG_explicit_axpby here") ig = data.geometry ig.voxel_size_x = 0.1 ig.voxel_size_y = 0.1 detectors = ig.shape[0] angles = np.linspace(0, np.pi, 180) ag = AcquisitionGeometry('parallel', '2D', angles, detectors, pixel_size_h=0.1, angle_unit='radian') # Select device # device = input('Available device: GPU==1 / CPU==0 ') # if device=='1': # dev = 'gpu' # else: # dev = 'cpu' dev = 'cpu' Aop = AstraProjectorSimple(ig, ag, dev) sin = Aop.direct(data) # Create noisy data. Apply Gaussian noise noises = ['gaussian', 'poisson'] noise = noises[1] if noise == 'poisson': np.random.seed(10) scale = 5 eta = 0 noisy_data = AcquisitionData( np.random.poisson(scale * (eta + sin.as_array())) / scale, geometry=ag) elif noise == 'gaussian': np.random.seed(10) n1 = np.random.normal(0, 0.1, size=ag.shape) noisy_data = AcquisitionData(n1 + sin.as_array(), geometry=ag) else: raise ValueError('Unsupported Noise ', noise) #%% 'explicit' SPDHG, scalar step-sizes subsets = 10 size_of_subsets = int(len(angles) / subsets) # create GradientOperator operator op1 = GradientOperator(ig) # take angles and create uniform subsets in uniform+sequential setting list_angles = [ angles[i:i + size_of_subsets] for i in range(0, len(angles), size_of_subsets) ] # create acquisitioin geometries for each the interval of splitting angles list_geoms = [ AcquisitionGeometry('parallel', '2D', list_angles[i], detectors, pixel_size_h=0.1, angle_unit='radian') for i in range(len(list_angles)) ] # create with operators as many as the subsets A = BlockOperator(*[ AstraProjectorSimple(ig, list_geoms[i], dev) for i in range(subsets) ] + [op1]) ## number of subsets #(sub2ind, ind2sub) = divide_1Darray_equally(range(len(A)), subsets) # ## acquisisiton data ## acquisisiton data AD_list = [] for sub_num in range(subsets): for i in range(0, len(angles), size_of_subsets): arr = noisy_data.as_array()[i:i + size_of_subsets, :] AD_list.append( AcquisitionData(arr, geometry=list_geoms[sub_num])) g = BlockDataContainer(*AD_list) alpha = 0.5 ## block function F = BlockFunction(*[ *[KullbackLeibler(b=g[i]) for i in range(subsets)] + [alpha * MixedL21Norm()] ]) G = IndicatorBox(lower=0) prob = [1 / (2 * subsets)] * (len(A) - 1) + [1 / 2] algos = [] algos.append( SPDHG(f=F, g=G, operator=A, max_iteration=1000, update_objective_interval=200, prob=prob.copy(), use_axpby=True)) algos[0].run(1000, verbose=0) algos.append( SPDHG(f=F, g=G, operator=A, max_iteration=1000, update_objective_interval=200, prob=prob.copy(), use_axpby=False)) algos[1].run(1000, verbose=0) # np.testing.assert_array_almost_equal(algos[0].get_output().as_array(), algos[1].get_output().as_array()) from cil.utilities.quality_measures import mae, mse, psnr qm = (mae(algos[0].get_output(), algos[1].get_output()), mse(algos[0].get_output(), algos[1].get_output()), psnr(algos[0].get_output(), algos[1].get_output())) if debug_print: print("Quality measures", qm) assert qm[0] < 0.005 assert qm[1] < 3.e-05
def test_SPDHG_vs_PDHG_explicit(self): data = dataexample.SIMPLE_PHANTOM_2D.get(size=(128, 128)) ig = data.geometry ig.voxel_size_x = 0.1 ig.voxel_size_y = 0.1 detectors = ig.shape[0] angles = np.linspace(0, np.pi, 180) ag = AcquisitionGeometry('parallel', '2D', angles, detectors, pixel_size_h=0.1, angle_unit='radian') # Select device dev = 'cpu' Aop = AstraProjectorSimple(ig, ag, dev) sin = Aop.direct(data) # Create noisy data. Apply Gaussian noise noises = ['gaussian', 'poisson'] noise = noises[1] if noise == 'poisson': scale = 5 noisy_data = scale * applynoise.poisson(sin / scale, seed=10) # np.random.seed(10) # scale = 5 # eta = 0 # noisy_data = AcquisitionData(np.random.poisson( scale * (eta + sin.as_array()))/scale, ag) elif noise == 'gaussian': noisy_data = noise.gaussian(sin, var=0.1, seed=10) # np.random.seed(10) # n1 = np.random.normal(0, 0.1, size = ag.shape) # noisy_data = AcquisitionData(n1 + sin.as_array(), ag) else: raise ValueError('Unsupported Noise ', noise) #%% 'explicit' SPDHG, scalar step-sizes subsets = 10 size_of_subsets = int(len(angles) / subsets) # create Gradient operator op1 = GradientOperator(ig) # take angles and create uniform subsets in uniform+sequential setting list_angles = [ angles[i:i + size_of_subsets] for i in range(0, len(angles), size_of_subsets) ] # create acquisitioin geometries for each the interval of splitting angles list_geoms = [ AcquisitionGeometry('parallel', '2D', list_angles[i], detectors, pixel_size_h=0.1, angle_unit='radian') for i in range(len(list_angles)) ] # create with operators as many as the subsets A = BlockOperator(*[ AstraProjectorSimple(ig, list_geoms[i], dev) for i in range(subsets) ] + [op1]) ## number of subsets #(sub2ind, ind2sub) = divide_1Darray_equally(range(len(A)), subsets) # ## acquisisiton data ## acquisisiton data AD_list = [] for sub_num in range(subsets): for i in range(0, len(angles), size_of_subsets): arr = noisy_data.as_array()[i:i + size_of_subsets, :] AD_list.append( AcquisitionData(arr, geometry=list_geoms[sub_num])) g = BlockDataContainer(*AD_list) alpha = 0.5 ## block function F = BlockFunction(*[ *[KullbackLeibler(b=g[i]) for i in range(subsets)] + [alpha * MixedL21Norm()] ]) G = IndicatorBox(lower=0) prob = [1 / (2 * subsets)] * (len(A) - 1) + [1 / 2] spdhg = SPDHG(f=F, g=G, operator=A, max_iteration=1000, update_objective_interval=200, prob=prob) spdhg.run(1000, verbose=0) #%% 'explicit' PDHG, scalar step-sizes op1 = GradientOperator(ig) op2 = Aop # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2, 1)) f2 = KullbackLeibler(b=noisy_data) g = IndicatorBox(lower=0) normK = operator.norm() sigma = 1 / normK tau = 1 / normK f1 = alpha * MixedL21Norm() f = BlockFunction(f1, f2) # Setup and run the PDHG algorithm pdhg = PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 1000 pdhg.update_objective_interval = 200 pdhg.run(1000, verbose=0) #%% show diff between PDHG and SPDHG # plt.imshow(spdhg.get_output().as_array() -pdhg.get_output().as_array()) # plt.colorbar() # plt.show() from cil.utilities.quality_measures import mae, mse, psnr qm = (mae(spdhg.get_output(), pdhg.get_output()), mse(spdhg.get_output(), pdhg.get_output()), psnr(spdhg.get_output(), pdhg.get_output())) if debug_print: print("Quality measures", qm) np.testing.assert_almost_equal(mae(spdhg.get_output(), pdhg.get_output()), 0.00150, decimal=3) np.testing.assert_almost_equal(mse(spdhg.get_output(), pdhg.get_output()), 1.68590e-05, decimal=3)
def test_PDHG_Denoising(self): print("PDHG Denoising with 3 noises") # adapted from demo PDHG_TV_Color_Denoising.py in CIL-Demos repository data = dataexample.PEPPERS.get(size=(256, 256)) ig = data.geometry ag = ig which_noise = 0 # Create noisy data. noises = ['gaussian', 'poisson', 's&p'] dnoise = noises[which_noise] def setup(data, dnoise): if dnoise == 's&p': n1 = applynoise.saltnpepper(data, salt_vs_pepper=0.9, amount=0.2, seed=10) elif dnoise == 'poisson': scale = 5 n1 = applynoise.poisson(data.as_array() / scale, seed=10) * scale elif dnoise == 'gaussian': n1 = applynoise.gaussian(data.as_array(), seed=10) else: raise ValueError('Unsupported Noise ', noise) noisy_data = ig.allocate() noisy_data.fill(n1) # Regularisation Parameter depending on the noise distribution if dnoise == 's&p': alpha = 0.8 elif dnoise == 'poisson': alpha = 1 elif dnoise == 'gaussian': alpha = .3 # fidelity if dnoise == 's&p': g = L1Norm(b=noisy_data) elif dnoise == 'poisson': g = KullbackLeibler(b=noisy_data) elif dnoise == 'gaussian': g = 0.5 * L2NormSquared(b=noisy_data) return noisy_data, alpha, g noisy_data, alpha, g = setup(data, dnoise) operator = GradientOperator( ig, correlation=GradientOperator.CORRELATION_SPACE) f1 = alpha * MixedL21Norm() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1 / (sigma * normK**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f1, g=g, operator=operator, tau=tau, sigma=sigma) pdhg1.max_iteration = 2000 pdhg1.update_objective_interval = 200 pdhg1.run(1000, verbose=0) rmse = (pdhg1.get_output() - data).norm() / data.as_array().size if debug_print: print("RMSE", rmse) self.assertLess(rmse, 2e-4) which_noise = 1 noise = noises[which_noise] noisy_data, alpha, g = setup(data, noise) operator = GradientOperator( ig, correlation=GradientOperator.CORRELATION_SPACE) f1 = alpha * MixedL21Norm() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1 / (sigma * normK**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f1, g=g, operator=operator, tau=tau, sigma=sigma, max_iteration=2000, update_objective_interval=200) pdhg1.run(1000, verbose=0) rmse = (pdhg1.get_output() - data).norm() / data.as_array().size if debug_print: print("RMSE", rmse) self.assertLess(rmse, 2e-4) which_noise = 2 noise = noises[which_noise] noisy_data, alpha, g = setup(data, noise) operator = GradientOperator( ig, correlation=GradientOperator.CORRELATION_SPACE) f1 = alpha * MixedL21Norm() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1 / (sigma * normK**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f1, g=g, operator=operator, tau=tau, sigma=sigma) pdhg1.max_iteration = 2000 pdhg1.update_objective_interval = 200 pdhg1.run(1000, verbose=0) rmse = (pdhg1.get_output() - data).norm() / data.as_array().size if debug_print: print("RMSE", rmse) self.assertLess(rmse, 2e-4)
def test_PDHG_vs_PDHG_explicit_axpby(self): data = dataexample.SIMPLE_PHANTOM_2D.get(size=(128, 128)) if debug_print: print("test_PDHG_vs_PDHG_explicit_axpby here") ig = data.geometry ig.voxel_size_x = 0.1 ig.voxel_size_y = 0.1 detectors = ig.shape[0] angles = np.linspace(0, np.pi, 180) ag = AcquisitionGeometry('parallel', '2D', angles, detectors, pixel_size_h=0.1, angle_unit='radian') dev = 'cpu' Aop = AstraProjectorSimple(ig, ag, dev) sin = Aop.direct(data) # Create noisy data. Apply Gaussian noise noises = ['gaussian', 'poisson'] noise = noises[1] if noise == 'poisson': np.random.seed(10) scale = 5 eta = 0 noisy_data = AcquisitionData( np.random.poisson(scale * (eta + sin.as_array())) / scale, geometry=ag) elif noise == 'gaussian': np.random.seed(10) n1 = np.random.normal(0, 0.1, size=ag.shape) noisy_data = AcquisitionData(n1 + sin.as_array(), geometry=ag) else: raise ValueError('Unsupported Noise ', noise) alpha = 0.5 op1 = GradientOperator(ig) op2 = Aop # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2, 1)) f2 = KullbackLeibler(b=noisy_data) g = IndicatorBox(lower=0) normK = operator.norm() sigma = 1. / normK tau = 1. / normK f1 = alpha * MixedL21Norm() f = BlockFunction(f1, f2) # Setup and run the PDHG algorithm algos = [] algos.append( PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma, max_iteration=1000, update_objective_interval=200, use_axpby=True)) algos[0].run(1000, verbose=0) algos.append( PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma, max_iteration=1000, update_objective_interval=200, use_axpby=False)) algos[1].run(1000, verbose=0) from cil.utilities.quality_measures import mae, mse, psnr qm = (mae(algos[0].get_output(), algos[1].get_output()), mse(algos[0].get_output(), algos[1].get_output()), psnr(algos[0].get_output(), algos[1].get_output())) if debug_print: print("Quality measures", qm) np.testing.assert_array_less(qm[0], 0.005) np.testing.assert_array_less(qm[1], 3e-05)
plt.figure(), plt.imshow(adjointimage.as_array()), plt.gray(), plt.colorbar() # Run dot test to check validity of adjoint. print(BOP.dot_test(BOP)) # Specify total variation regularised least squares # Create operators op1 = GradientOperator(ig_gray, correlation=GradientOperator.CORRELATION_SPACE) op2 = BOP # Set regularisation parameter. alpha = 0.02 # Create functions to be blocked with operators f1 = alpha * MixedL21Norm() f2 = 0.5 * L2NormSquared(b=blurredimage) # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2,1) ) # Create functions f = BlockFunction(f1, f2) g = ZeroFunction() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1/(sigma*normK**2)
if noise == 's&p': f2 = L1Norm(b=noisy_data) elif noise == 'poisson': f2 = KullbackLeibler(noisy_data) elif noise == 'gaussian': f2 = 0.5 * L2NormSquared(b=noisy_data) # Create operators op1 = GradientOperator(ig, correlation=GradientOperator.CORRELATION_SPACE) op2 = MO # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2, 1)) # Create functions f = BlockFunction(alpha * MixedL21Norm(), f2) g = ZeroFunction() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1 / (sigma * normK**2) # Setup and run the PDHG algorithm pdhg = PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 2000 pdhg.update_objective_interval = 100 pdhg.run(2000)
algo.update_objective_interval = 10 cProfile.run('algo.run(100, verbose=1)') #%% plotter2D(algo.solution, cmap='gist_earth') #%% cProfile.run('algo.run(1)') # %% from cil.optimisation.algorithms import PDHG from cil.optimisation.functions import MixedL21Norm, BlockFunction, L2NormSquared, IndicatorBox from cil.optimisation.operators import GradientOperator, BlockOperator nabla = GradientOperator(ig_cs, backend='c') F = BlockFunction(L2NormSquared(b=ldata), alpha * MixedL21Norm()) BK = BlockOperator(K, nabla) # normK = BK.norm() normK = 191.54791313753265 pdhg = PDHG(f=F, g=IndicatorBox(lower=0.), operator=BK, max_iteration=1000, update_objective_interval=100) #%% pdhg.run(100, verbose=2, print_interval=10) #%% plotter2D(pdhg.solution, cmap='gist_earth') # %%