def setUp(self, *args, **kwargs): M, N, K = 3, 4, 5 self.ig = ImageGeometry(M, N, K) self.x = self.ig.allocate('random', seed=1) self.b = self.ig.allocate('random', seed=2) self.eta = self.ig.allocate(0.1) self.operator = IdentityOperator(self.ig) scalar = 0.25 self.f1 = L2NormSquared() self.f2 = L1Norm() self.f3 = scalar * L2NormSquared() self.f4 = scalar * L1Norm() self.f5 = scalar * L2NormSquared(b=self.b) self.f6 = scalar * L1Norm(b=self.b) self.f7 = ZeroFunction() self.f8 = 5 * ConstantFunction(10) self.f9 = LeastSquares(self.operator, self.b, c=scalar) self.f10 = 0.5 * KullbackLeibler(b=self.b, eta=self.eta) self.f11 = KullbackLeibler(b=self.b, eta=self.eta) self.f12 = 10 self.list1 = [self.f1, self.f2, self.f3, self.f4, self.f5, \ self.f6, self.f7, self.f8, self.f9, self.f10, self.f11, self.f12]
def setUp(self): print("test_KullbackLeibler numba") #numpy.random.seed(1) M, N, K = 2, 3, 4 ig = ImageGeometry(N, M) u1 = ig.allocate('random', seed=500) u1 = ig.allocate(0.2) #g1 = ig.allocate('random', seed = 100) g1 = ig.allocate(1) b1 = ig.allocate('random', seed=1000) eta = ig.allocate(1e-3) mask = ig.allocate(1) mask.fill(0, horizontal_x=0) mask_c = ig.allocate(0) mask_c.fill(1, horizontal_x=0) # print ("mask\n", mask.as_array()) # print ("mask_c\n", mask_c.as_array()) f = KullbackLeibler(b=g1, use_numba=True, eta=eta) f_np = KullbackLeibler(b=g1, use_numba=False, eta=eta) # mask is on vartical=0 # separate the u1 vertical=0 f_mask = KullbackLeibler(b=g1.copy(), use_numba=True, mask=mask.copy(), eta=eta.copy()) f_mask_c = KullbackLeibler(b=g1.copy(), use_numba=True, mask=mask_c.copy(), eta=eta.copy()) f_on_mask = KullbackLeibler(b=g1.subset(horizontal_x=0), use_numba=True, eta=eta.subset(horizontal_x=0)) u1_on_mask = u1.subset(horizontal_x=0) tau = 400.4 self.tau = tau self.u1 = u1 self.g1 = g1 self.b1 = b1 self.eta = eta self.f = f self.f_np = f_np self.mask = mask self.mask_c = mask_c self.f_mask = f_mask self.f_mask_c = f_mask_c self.f_on_mask = f_on_mask self.u1_on_mask = u1_on_mask
def test_FISTA_Denoising(self): if debug_print: print("FISTA Denoising Poisson Noise Tikhonov") # adapted from demo FISTA_Tikhonov_Poisson_Denoising.py in CIL-Demos repository data = dataexample.SHAPES.get() ig = data.geometry ag = ig N = 300 # Create Noisy data with Poisson noise scale = 5 noisy_data = applynoise.poisson(data / scale, seed=10) * scale # Regularisation Parameter alpha = 10 # Setup and run the FISTA algorithm operator = GradientOperator(ig) fid = KullbackLeibler(b=noisy_data) reg = OperatorCompositionFunction(alpha * L2NormSquared(), operator) initial = ig.allocate() fista = FISTA(initial=initial, f=reg, g=fid) fista.max_iteration = 3000 fista.update_objective_interval = 500 fista.run(verbose=0) rmse = (fista.get_output() - data).norm() / data.as_array().size if debug_print: print("RMSE", rmse) self.assertLess(rmse, 4.2e-4)
def setup(data, dnoise): if dnoise == 's&p': n1 = applynoise.saltnpepper(data, salt_vs_pepper=0.9, amount=0.2, seed=10) elif dnoise == 'poisson': scale = 5 n1 = applynoise.poisson(data.as_array() / scale, seed=10) * scale elif dnoise == 'gaussian': n1 = applynoise.gaussian(data.as_array(), seed=10) else: raise ValueError('Unsupported Noise ', noise) noisy_data = ig.allocate() noisy_data.fill(n1) # Regularisation Parameter depending on the noise distribution if dnoise == 's&p': alpha = 0.8 elif dnoise == 'poisson': alpha = 1 elif dnoise == 'gaussian': alpha = .3 # fidelity if dnoise == 's&p': g = L1Norm(b=noisy_data) elif dnoise == 'poisson': g = KullbackLeibler(b=noisy_data) elif dnoise == 'gaussian': g = 0.5 * L2NormSquared(b=noisy_data) return noisy_data, alpha, g
def setUp(self): ig = ImageGeometry(2, 3, 2) data = ig.allocate(1, dtype=np.float32) noisy_data = data + 1 # TV regularisation parameter self.alpha = 1 self.fidelities = [ 0.5 * L2NormSquared(b=noisy_data), L1Norm(b=noisy_data), KullbackLeibler(b=noisy_data, use_numba=False) ] F = self.alpha * MixedL21Norm() K = GradientOperator(ig) # Compute operator Norm normK = K.norm() # Primal & dual stepsizes self.sigma = 1. / normK self.tau = 1. / normK self.F = F self.K = K
def test_compare_with_PDHG(self): # Load an image from the CIL gallery. data = dataexample.SHAPES.get() ig = data.geometry # Add gaussian noise noisy_data = applynoise.gaussian(data, seed=10, var=0.005) # TV regularisation parameter alpha = 1 # fidelity = 0.5 * L2NormSquared(b=noisy_data) # fidelity = L1Norm(b=noisy_data) fidelity = KullbackLeibler(b=noisy_data, use_numba=False) # Setup and run the PDHG algorithm F = BlockFunction(alpha * MixedL21Norm(), fidelity) G = ZeroFunction() K = BlockOperator(GradientOperator(ig), IdentityOperator(ig)) # Compute operator Norm normK = K.norm() # Primal & dual stepsizes sigma = 1. / normK tau = 1. / normK pdhg = PDHG(f=F, g=G, operator=K, tau=tau, sigma=sigma, max_iteration=100, update_objective_interval=10) pdhg.run(verbose=0) sigma = 1 tau = sigma / normK**2 admm = LADMM(f=G, g=F, operator=K, tau=tau, sigma=sigma, max_iteration=100, update_objective_interval=10) admm.run(verbose=0) from cil.utilities.quality_measures import psnr if debug_print: print("PSNR", psnr(admm.solution, pdhg.solution)) np.testing.assert_almost_equal(psnr(admm.solution, pdhg.solution), 84.46678222768597, decimal=4)
def test_SPDHG_vs_SPDHG_explicit_axpby(self): data = dataexample.SIMPLE_PHANTOM_2D.get(size=(128, 128)) if debug_print: print("test_SPDHG_vs_SPDHG_explicit_axpby here") ig = data.geometry ig.voxel_size_x = 0.1 ig.voxel_size_y = 0.1 detectors = ig.shape[0] angles = np.linspace(0, np.pi, 180) ag = AcquisitionGeometry('parallel', '2D', angles, detectors, pixel_size_h=0.1, angle_unit='radian') # Select device # device = input('Available device: GPU==1 / CPU==0 ') # if device=='1': # dev = 'gpu' # else: # dev = 'cpu' dev = 'cpu' Aop = AstraProjectorSimple(ig, ag, dev) sin = Aop.direct(data) # Create noisy data. Apply Gaussian noise noises = ['gaussian', 'poisson'] noise = noises[1] if noise == 'poisson': np.random.seed(10) scale = 5 eta = 0 noisy_data = AcquisitionData( np.random.poisson(scale * (eta + sin.as_array())) / scale, geometry=ag) elif noise == 'gaussian': np.random.seed(10) n1 = np.random.normal(0, 0.1, size=ag.shape) noisy_data = AcquisitionData(n1 + sin.as_array(), geometry=ag) else: raise ValueError('Unsupported Noise ', noise) #%% 'explicit' SPDHG, scalar step-sizes subsets = 10 size_of_subsets = int(len(angles) / subsets) # create GradientOperator operator op1 = GradientOperator(ig) # take angles and create uniform subsets in uniform+sequential setting list_angles = [ angles[i:i + size_of_subsets] for i in range(0, len(angles), size_of_subsets) ] # create acquisitioin geometries for each the interval of splitting angles list_geoms = [ AcquisitionGeometry('parallel', '2D', list_angles[i], detectors, pixel_size_h=0.1, angle_unit='radian') for i in range(len(list_angles)) ] # create with operators as many as the subsets A = BlockOperator(*[ AstraProjectorSimple(ig, list_geoms[i], dev) for i in range(subsets) ] + [op1]) ## number of subsets #(sub2ind, ind2sub) = divide_1Darray_equally(range(len(A)), subsets) # ## acquisisiton data ## acquisisiton data AD_list = [] for sub_num in range(subsets): for i in range(0, len(angles), size_of_subsets): arr = noisy_data.as_array()[i:i + size_of_subsets, :] AD_list.append( AcquisitionData(arr, geometry=list_geoms[sub_num])) g = BlockDataContainer(*AD_list) alpha = 0.5 ## block function F = BlockFunction(*[ *[KullbackLeibler(b=g[i]) for i in range(subsets)] + [alpha * MixedL21Norm()] ]) G = IndicatorBox(lower=0) prob = [1 / (2 * subsets)] * (len(A) - 1) + [1 / 2] algos = [] algos.append( SPDHG(f=F, g=G, operator=A, max_iteration=1000, update_objective_interval=200, prob=prob.copy(), use_axpby=True)) algos[0].run(1000, verbose=0) algos.append( SPDHG(f=F, g=G, operator=A, max_iteration=1000, update_objective_interval=200, prob=prob.copy(), use_axpby=False)) algos[1].run(1000, verbose=0) # np.testing.assert_array_almost_equal(algos[0].get_output().as_array(), algos[1].get_output().as_array()) from cil.utilities.quality_measures import mae, mse, psnr qm = (mae(algos[0].get_output(), algos[1].get_output()), mse(algos[0].get_output(), algos[1].get_output()), psnr(algos[0].get_output(), algos[1].get_output())) if debug_print: print("Quality measures", qm) assert qm[0] < 0.005 assert qm[1] < 3.e-05
def test_SPDHG_vs_PDHG_explicit(self): data = dataexample.SIMPLE_PHANTOM_2D.get(size=(128, 128)) ig = data.geometry ig.voxel_size_x = 0.1 ig.voxel_size_y = 0.1 detectors = ig.shape[0] angles = np.linspace(0, np.pi, 180) ag = AcquisitionGeometry('parallel', '2D', angles, detectors, pixel_size_h=0.1, angle_unit='radian') # Select device dev = 'cpu' Aop = AstraProjectorSimple(ig, ag, dev) sin = Aop.direct(data) # Create noisy data. Apply Gaussian noise noises = ['gaussian', 'poisson'] noise = noises[1] if noise == 'poisson': scale = 5 noisy_data = scale * applynoise.poisson(sin / scale, seed=10) # np.random.seed(10) # scale = 5 # eta = 0 # noisy_data = AcquisitionData(np.random.poisson( scale * (eta + sin.as_array()))/scale, ag) elif noise == 'gaussian': noisy_data = noise.gaussian(sin, var=0.1, seed=10) # np.random.seed(10) # n1 = np.random.normal(0, 0.1, size = ag.shape) # noisy_data = AcquisitionData(n1 + sin.as_array(), ag) else: raise ValueError('Unsupported Noise ', noise) #%% 'explicit' SPDHG, scalar step-sizes subsets = 10 size_of_subsets = int(len(angles) / subsets) # create Gradient operator op1 = GradientOperator(ig) # take angles and create uniform subsets in uniform+sequential setting list_angles = [ angles[i:i + size_of_subsets] for i in range(0, len(angles), size_of_subsets) ] # create acquisitioin geometries for each the interval of splitting angles list_geoms = [ AcquisitionGeometry('parallel', '2D', list_angles[i], detectors, pixel_size_h=0.1, angle_unit='radian') for i in range(len(list_angles)) ] # create with operators as many as the subsets A = BlockOperator(*[ AstraProjectorSimple(ig, list_geoms[i], dev) for i in range(subsets) ] + [op1]) ## number of subsets #(sub2ind, ind2sub) = divide_1Darray_equally(range(len(A)), subsets) # ## acquisisiton data ## acquisisiton data AD_list = [] for sub_num in range(subsets): for i in range(0, len(angles), size_of_subsets): arr = noisy_data.as_array()[i:i + size_of_subsets, :] AD_list.append( AcquisitionData(arr, geometry=list_geoms[sub_num])) g = BlockDataContainer(*AD_list) alpha = 0.5 ## block function F = BlockFunction(*[ *[KullbackLeibler(b=g[i]) for i in range(subsets)] + [alpha * MixedL21Norm()] ]) G = IndicatorBox(lower=0) prob = [1 / (2 * subsets)] * (len(A) - 1) + [1 / 2] spdhg = SPDHG(f=F, g=G, operator=A, max_iteration=1000, update_objective_interval=200, prob=prob) spdhg.run(1000, verbose=0) #%% 'explicit' PDHG, scalar step-sizes op1 = GradientOperator(ig) op2 = Aop # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2, 1)) f2 = KullbackLeibler(b=noisy_data) g = IndicatorBox(lower=0) normK = operator.norm() sigma = 1 / normK tau = 1 / normK f1 = alpha * MixedL21Norm() f = BlockFunction(f1, f2) # Setup and run the PDHG algorithm pdhg = PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 1000 pdhg.update_objective_interval = 200 pdhg.run(1000, verbose=0) #%% show diff between PDHG and SPDHG # plt.imshow(spdhg.get_output().as_array() -pdhg.get_output().as_array()) # plt.colorbar() # plt.show() from cil.utilities.quality_measures import mae, mse, psnr qm = (mae(spdhg.get_output(), pdhg.get_output()), mse(spdhg.get_output(), pdhg.get_output()), psnr(spdhg.get_output(), pdhg.get_output())) if debug_print: print("Quality measures", qm) np.testing.assert_almost_equal(mae(spdhg.get_output(), pdhg.get_output()), 0.00150, decimal=3) np.testing.assert_almost_equal(mse(spdhg.get_output(), pdhg.get_output()), 1.68590e-05, decimal=3)
def test_SPDHG_vs_PDHG_implicit(self): data = dataexample.SIMPLE_PHANTOM_2D.get(size=(128, 128)) ig = data.geometry ig.voxel_size_x = 0.1 ig.voxel_size_y = 0.1 detectors = ig.shape[0] angles = np.linspace(0, np.pi, 90) ag = AcquisitionGeometry('parallel', '2D', angles, detectors, pixel_size_h=0.1, angle_unit='radian') # Select device dev = 'cpu' Aop = AstraProjectorSimple(ig, ag, dev) sin = Aop.direct(data) # Create noisy data. Apply Gaussian noise noises = ['gaussian', 'poisson'] noise = noises[1] noisy_data = ag.allocate() if noise == 'poisson': np.random.seed(10) scale = 20 eta = 0 noisy_data.fill( np.random.poisson(scale * (eta + sin.as_array())) / scale) elif noise == 'gaussian': np.random.seed(10) n1 = np.random.normal(0, 0.1, size=ag.shape) noisy_data.fill(n1 + sin.as_array()) else: raise ValueError('Unsupported Noise ', noise) # Create BlockOperator operator = Aop f = KullbackLeibler(b=noisy_data) alpha = 0.005 g = alpha * TotalVariation(50, 1e-4, lower=0) normK = operator.norm() #% 'implicit' PDHG, preconditioned step-sizes tau_tmp = 1. sigma_tmp = 1. tau = sigma_tmp / operator.adjoint( tau_tmp * operator.range_geometry().allocate(1.)) sigma = tau_tmp / operator.direct( sigma_tmp * operator.domain_geometry().allocate(1.)) # initial = operator.domain_geometry().allocate() # # Setup and run the PDHG algorithm pdhg = PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma, max_iteration=1000, update_objective_interval=500) pdhg.run(verbose=0) subsets = 10 size_of_subsets = int(len(angles) / subsets) # take angles and create uniform subsets in uniform+sequential setting list_angles = [ angles[i:i + size_of_subsets] for i in range(0, len(angles), size_of_subsets) ] # create acquisitioin geometries for each the interval of splitting angles list_geoms = [ AcquisitionGeometry('parallel', '2D', list_angles[i], detectors, pixel_size_h=0.1, angle_unit='radian') for i in range(len(list_angles)) ] # create with operators as many as the subsets A = BlockOperator(*[ AstraProjectorSimple(ig, list_geoms[i], dev) for i in range(subsets) ]) ## number of subsets #(sub2ind, ind2sub) = divide_1Darray_equally(range(len(A)), subsets) # ## acquisisiton data AD_list = [] for sub_num in range(subsets): for i in range(0, len(angles), size_of_subsets): arr = noisy_data.as_array()[i:i + size_of_subsets, :] AD_list.append( AcquisitionData(arr, geometry=list_geoms[sub_num])) g = BlockDataContainer(*AD_list) ## block function F = BlockFunction(*[KullbackLeibler(b=g[i]) for i in range(subsets)]) G = alpha * TotalVariation(50, 1e-4, lower=0) prob = [1 / len(A)] * len(A) spdhg = SPDHG(f=F, g=G, operator=A, max_iteration=1000, update_objective_interval=200, prob=prob) spdhg.run(1000, verbose=0) from cil.utilities.quality_measures import mae, mse, psnr qm = (mae(spdhg.get_output(), pdhg.get_output()), mse(spdhg.get_output(), pdhg.get_output()), psnr(spdhg.get_output(), pdhg.get_output())) if debug_print: print("Quality measures", qm) np.testing.assert_almost_equal(mae(spdhg.get_output(), pdhg.get_output()), 0.000335, decimal=3) np.testing.assert_almost_equal(mse(spdhg.get_output(), pdhg.get_output()), 5.51141e-06, decimal=3)
def test_PDHG_vs_PDHG_explicit_axpby(self): data = dataexample.SIMPLE_PHANTOM_2D.get(size=(128, 128)) if debug_print: print("test_PDHG_vs_PDHG_explicit_axpby here") ig = data.geometry ig.voxel_size_x = 0.1 ig.voxel_size_y = 0.1 detectors = ig.shape[0] angles = np.linspace(0, np.pi, 180) ag = AcquisitionGeometry('parallel', '2D', angles, detectors, pixel_size_h=0.1, angle_unit='radian') dev = 'cpu' Aop = AstraProjectorSimple(ig, ag, dev) sin = Aop.direct(data) # Create noisy data. Apply Gaussian noise noises = ['gaussian', 'poisson'] noise = noises[1] if noise == 'poisson': np.random.seed(10) scale = 5 eta = 0 noisy_data = AcquisitionData( np.random.poisson(scale * (eta + sin.as_array())) / scale, geometry=ag) elif noise == 'gaussian': np.random.seed(10) n1 = np.random.normal(0, 0.1, size=ag.shape) noisy_data = AcquisitionData(n1 + sin.as_array(), geometry=ag) else: raise ValueError('Unsupported Noise ', noise) alpha = 0.5 op1 = GradientOperator(ig) op2 = Aop # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2, 1)) f2 = KullbackLeibler(b=noisy_data) g = IndicatorBox(lower=0) normK = operator.norm() sigma = 1. / normK tau = 1. / normK f1 = alpha * MixedL21Norm() f = BlockFunction(f1, f2) # Setup and run the PDHG algorithm algos = [] algos.append( PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma, max_iteration=1000, update_objective_interval=200, use_axpby=True)) algos[0].run(1000, verbose=0) algos.append( PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma, max_iteration=1000, update_objective_interval=200, use_axpby=False)) algos[1].run(1000, verbose=0) from cil.utilities.quality_measures import mae, mse, psnr qm = (mae(algos[0].get_output(), algos[1].get_output()), mse(algos[0].get_output(), algos[1].get_output()), psnr(algos[0].get_output(), algos[1].get_output())) if debug_print: print("Quality measures", qm) np.testing.assert_array_less(qm[0], 0.005) np.testing.assert_array_less(qm[1], 3e-05)
def test_SumFunction(self): M, N, K = 3,4,5 ig = ImageGeometry(M, N, K) tmp = ig.allocate('random', seed=1) b = ig.allocate('random', seed=2) eta = ig.allocate(0.1) operator = IdentityOperator(ig) scalar = 0.25 f1 = L2NormSquared() f2 = L1Norm() f3 = scalar * L2NormSquared() f4 = scalar * L1Norm() f5 = scalar * L2NormSquared(b=b) f6 = scalar * L1Norm(b=b) f7 = ZeroFunction() f8 = 5 * ConstantFunction(10) f9 = LeastSquares(operator, b, c=scalar) f10 = 0.5*KullbackLeibler(b=b,eta = eta) f11 = KullbackLeibler(b=b, eta =eta) f12 = 10 # f10 = 0.5 * MixedL21Norm() # f11 = IndicatorBox(lower=0) list1 = [f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12] print('################### Check sum of two functions ################## \n') for func in list1: # check sum of two functions if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ if isinstance(func, Number): tmp_fun_eval = func else: tmp_fun_eval = func(tmp) sumf = f1 + func self.assertNumpyArrayAlmostEqual(sumf(tmp), f1(tmp) + tmp_fun_eval ) print('{} = ( {} + {} ) is OK'.format(type(sumf).__name__, type(f1).__name__, type_fun)) sumf1 = func + f1 self.assertNumpyArrayAlmostEqual(sumf1(tmp), tmp_fun_eval + f1(tmp)) print('Checking commutative') print('{} + ( {} + {} ) is OK\n'.format(type(sumf1).__name__, type_fun, type(f1).__name__)) print('################### Check Lispchitz constant ################## \n') for i,func in enumerate(list1): if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ try: # check Lispchitz sum of two functions print ("i", i,func.__class__.__name__) if isinstance(func, Number): tmp_fun_L = 0 else: tmp_fun_L = func.L sumf = f1 + func try: sumf.L==f1.L + tmp_fun_L except TypeError: print('Function {} has L = None'.format(type_fun)) except ValueError as nie: print (func.__class__.__name__, nie) print('\n################### Check Gradient ################## \n') for func in list1: if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ sumf = f1 + func # check gradient try: if isinstance(func, Number): tmp_fun_gradient = 0 else: tmp_fun_gradient = func.gradient(tmp) self.assertNumpyArrayAlmostEqual(sumf.gradient(tmp).as_array(), (f1.gradient(tmp) + tmp_fun_gradient).as_array()) except NotImplementedError: print("{} is not differentiable".format(type_fun)) print('\n################### Check Gradient Out ################## \n') out_left = ig.allocate() out_right1 = ig.allocate() out_right2 = ig.allocate() for i, func in enumerate(list1): if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ sumf = f1 + func # check gradient out try: if isinstance(func, Number): tmp_fun_gradient_out = 0 else: func.gradient(tmp, out = out_right2) tmp_fun_gradient_out = out_right2.as_array() #print('Check {} + {}\n'.format(type(f1).__name__, type_fun)) sumf.gradient(tmp, out = out_left) f1.gradient(tmp, out = out_right1) self.assertNumpyArrayAlmostEqual(out_left.as_array(), out_right1.as_array() + tmp_fun_gradient_out) except NotImplementedError: print("{} is not differentiable".format(type_fun))
def test_KullbackLeibler(self): print("test_KullbackLeibler") #numpy.random.seed(1) M, N, K = 2, 3, 4 ig = ImageGeometry(N, M, K) u1 = ig.allocate('random', seed=500) g1 = ig.allocate('random', seed=100) b1 = ig.allocate('random', seed=1000) # with no data try: f = KullbackLeibler() except ValueError: print('Give data b=...\n') print('With negative data, no background\n') try: f = KullbackLeibler(b=-1 * g1) except ValueError: print('We have negative data\n') f = KullbackLeibler(b=g1) print('Check KullbackLeibler(x,x)=0\n') self.assertNumpyArrayAlmostEqual(0.0, f(g1)) print('Check gradient .... is OK \n') res_gradient = f.gradient(u1) res_gradient_out = u1.geometry.allocate() f.gradient(u1, out=res_gradient_out) self.assertNumpyArrayAlmostEqual(res_gradient.as_array(), \ res_gradient_out.as_array(),decimal = 4) print('Check proximal ... is OK\n') tau = 400.4 res_proximal = f.proximal(u1, tau) res_proximal_out = u1.geometry.allocate() f.proximal(u1, tau, out=res_proximal_out) self.assertNumpyArrayAlmostEqual(res_proximal.as_array(), \ res_proximal_out.as_array(), decimal =5) print('Check conjugate ... is OK\n') if (1 - u1.as_array()).all(): print('If 1-x<=0, Convex conjugate returns 0.0') u2 = u1 * 0 + 2. self.assertNumpyArrayAlmostEqual(0.0, f.convex_conjugate(u2)) print('Check KullbackLeibler with background\n') eta = b1 f1 = KullbackLeibler(b=g1, eta=b1) tmp_sum = (u1 + eta).as_array() ind = tmp_sum >= 0 tmp = scipy.special.kl_div(f1.b.as_array()[ind], tmp_sum[ind]) self.assertNumpyArrayAlmostEqual(f1(u1), numpy.sum(tmp)) res_proximal_conj_out = u1.geometry.allocate() proxc = f.proximal_conjugate(u1, tau) f.proximal_conjugate(u1, tau, out=res_proximal_conj_out) print(res_proximal_conj_out.as_array()) print(proxc.as_array()) numpy.testing.assert_array_almost_equal( proxc.as_array(), res_proximal_conj_out.as_array())
noisy_data = MO.direct(noisy_data) # Regularisation Parameter depending on the noise distribution if noise == 's&p': alpha = 0.8 elif noise == 'poisson': alpha = 1.0 elif noise == 'gaussian': alpha = .3 # Choose data fidelity dependent on noise type. if noise == 's&p': f2 = L1Norm(b=noisy_data) elif noise == 'poisson': f2 = KullbackLeibler(noisy_data) elif noise == 'gaussian': f2 = 0.5 * L2NormSquared(b=noisy_data) # Create operators op1 = GradientOperator(ig, correlation=GradientOperator.CORRELATION_SPACE) op2 = MO # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2, 1)) # Create functions f = BlockFunction(alpha * MixedL21Norm(), f2) g = ZeroFunction() # Compute operator Norm