def test_mixedL12Norm(self): M, N, K = 2, 3, 5 ig = ImageGeometry(voxel_num_x=M, voxel_num_y=N) u1 = ig.allocate('random_int') u2 = ig.allocate('random_int') U = BlockDataContainer(u1, u2, shape=(2, 1)) # Define no scale and scaled f_no_scaled = MixedL21Norm() f_scaled = 1 * MixedL21Norm() # call a1 = f_no_scaled(U) a2 = f_scaled(U) self.assertNumpyArrayAlmostEqual(a1, a2) tmp = [el**2 for el in U.containers] self.assertBlockDataContainerEqual(BlockDataContainer(*tmp), U.power(2)) z1 = f_no_scaled.proximal_conjugate(U, 1) u3 = ig.allocate('random_int') u4 = ig.allocate('random_int') z3 = BlockDataContainer(u3, u4, shape=(2, 1)) f_no_scaled.proximal_conjugate(U, 1, out=z3) self.assertBlockDataContainerEqual(z3, z1)
op22 = SymmetrizedGradient(op11.domain_geometry()) op21 = ZeroOperator(ig, op22.range_geometry()) op31 = Identity(ig, ag) op32 = ZeroOperator(op22.domain_geometry(), ag) operator = BlockOperator(op11, -1 * op12, op21, op22, op31, op32, shape=(3, 2)) f1 = alpha * MixedL21Norm() f2 = beta * MixedL21Norm() f = BlockFunction(f1, f2, f3) g = ZeroFunction() else: # Create operators op11 = Gradient(ig) op12 = Identity(op11.range_geometry()) op22 = SymmetrizedGradient(op11.domain_geometry()) op21 = ZeroOperator(ig, op22.range_geometry()) operator = BlockOperator(op11, -1 * op12, op21, op22, shape=(2, 2))
noisy_data = AcquisitionData(n1, ag) # Regularisation Parameter alpha = 10 # Create operators #op1 = Gradient(ig) op1 = Gradient(ig, correlation='SpaceChannels') op2 = Aop # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2,1) ) # Create functions f1 = alpha * MixedL21Norm() f2 = KullbackLeibler(noisy_data) f = BlockFunction(f1, f2) g = ZeroFunction() normK = operator.norm() # Primal & dual stepsizes sigma = 5 tau = 1/(sigma*normK**2) # Setup and run the PDHG algorithm pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) pdhg.max_iteration = 1000 pdhg.update_objective_interval = 200 pdhg.run(1000)
#%% Coupling Total variation reconstruction in 4D volume. For this case there is no GPU implementation # But we can use another algorithm called PDHG ( primal - dual hybrid gradient) # Set up operators: Projection and Gradient op1 = A3D_chan op2 = Gradient(ig) # Set up a BlockOperator operator = BlockOperator(op1, op2, shape=(2, 1)) # Compute the operator norm normK = operator.norm() alpha_coupled = 0.05 f1 = 0.5 * L2NormSquared(b=data) f2 = alpha_coupled * MixedL21Norm() f = BlockFunction(f1, f2) g = IndicatorBox(lower=0) sigma = 1 tau = 1 / (sigma * normK**2) pdhg = PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 100 pdhg.update_objective_interval = 20 pdhg.run(1000, verbose=True, callback=show_data_4D) #%% Let's move to 2D + energy channel reconstruction ag2D = AcquisitionGeometry(
if noise == 's&p': f2 = L1Norm(b=noisy_data) elif noise == 'poisson': f2 = KullbackLeibler(noisy_data) elif noise == 'gaussian': f2 = 0.5 * L2NormSquared(b=noisy_data) # Create operators op1 = Gradient(ig, correlation=Gradient.CORRELATION_SPACE) op2 = MO # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2,1) ) # Create functions f = BlockFunction(alpha * MixedL21Norm(), f2) g = ZeroFunction() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1/(sigma*normK**2) # Setup and run the PDHG algorithm pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 1000 pdhg.update_objective_interval = 100 pdhg.run(1000)
from ccpi.optimisation.functions import L2NormSquared, MixedL21Norm, L1Norm from ccpi.framework import ImageGeometry, BlockGeometry from ccpi.optimisation.operators import Gradient, Identity, BlockOperator import numpy import numpy as np ig = ImageGeometry(M, N) BG = BlockGeometry(ig, ig) u = ig.allocate('random_int') B = BlockOperator(Gradient(ig), Identity(ig)) U = B.direct(u) b = ig.allocate('random_int') f1 = 10 * MixedL21Norm() f2 = 0.5 * L2NormSquared(b=b) f = BlockFunction(f1, f2) tau = 0.3 print(" without out ") res_no_out = f.proximal_conjugate(U, tau) res_out = B.range_geometry().allocate() f.proximal_conjugate(U, tau, out=res_out) numpy.testing.assert_array_almost_equal(res_no_out[0][0].as_array(), \ res_out[0][0].as_array(), decimal=4) numpy.testing.assert_array_almost_equal(res_no_out[0][1].as_array(), \ res_out[0][1].as_array(), decimal=4)
def test_PDHG_Denoising(self): print ("PDHG Denoising with 3 noises") # adapted from demo PDHG_TV_Color_Denoising.py in CIL-Demos repository # loader = TestData(data_dir=os.path.join(os.environ['SIRF_INSTALL_PATH'], 'share','ccpi')) # loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi')) loader = TestData() data = loader.load(TestData.PEPPERS, size=(256,256)) ig = data.geometry ag = ig which_noise = 0 # Create noisy data. noises = ['gaussian', 'poisson', 's&p'] noise = noises[which_noise] def setup(data, noise): if noise == 's&p': n1 = TestData.random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2, seed=10) elif noise == 'poisson': scale = 5 n1 = TestData.random_noise( data.as_array()/scale, mode = noise, seed = 10)*scale elif noise == 'gaussian': n1 = TestData.random_noise(data.as_array(), mode = noise, seed = 10) else: raise ValueError('Unsupported Noise ', noise) noisy_data = ig.allocate() noisy_data.fill(n1) # Regularisation Parameter depending on the noise distribution if noise == 's&p': alpha = 0.8 elif noise == 'poisson': alpha = 1 elif noise == 'gaussian': alpha = .3 # fidelity if noise == 's&p': g = L1Norm(b=noisy_data) elif noise == 'poisson': g = KullbackLeibler(b=noisy_data) elif noise == 'gaussian': g = 0.5 * L2NormSquared(b=noisy_data) return noisy_data, alpha, g noisy_data, alpha, g = setup(data, noise) operator = Gradient(ig, correlation=Gradient.CORRELATION_SPACE) f1 = alpha * MixedL21Norm() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1/(sigma*normK**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f1,g=g,operator=operator, tau=tau, sigma=sigma) pdhg1.max_iteration = 2000 pdhg1.update_objective_interval = 200 pdhg1.run(1000, very_verbose=True) rmse = (pdhg1.get_output() - data).norm() / data.as_array().size print ("RMSE", rmse) self.assertLess(rmse, 2e-4) which_noise = 1 noise = noises[which_noise] noisy_data, alpha, g = setup(data, noise) operator = Gradient(ig, correlation=Gradient.CORRELATION_SPACE) f1 = alpha * MixedL21Norm() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1/(sigma*normK**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f1,g=g,operator=operator, tau=tau, sigma=sigma, max_iteration=2000, update_objective_interval=200) pdhg1.run(1000) rmse = (pdhg1.get_output() - data).norm() / data.as_array().size print ("RMSE", rmse) self.assertLess(rmse, 2e-4) which_noise = 2 noise = noises[which_noise] noisy_data, alpha, g = setup(data, noise) operator = Gradient(ig, correlation=Gradient.CORRELATION_SPACE) f1 = alpha * MixedL21Norm() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1/(sigma*normK**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f1,g=g,operator=operator, tau=tau, sigma=sigma) pdhg1.max_iteration = 2000 pdhg1.update_objective_interval = 200 pdhg1.run(1000) rmse = (pdhg1.get_output() - data).norm() / data.as_array().size print ("RMSE", rmse) self.assertLess(rmse, 2e-4)