def test_FISTA_Norm2Sq(self): print("Test FISTA Norm2Sq") ig = ImageGeometry(127, 139, 149) b = ig.allocate(ImageGeometry.RANDOM) # fill with random numbers initial = ig.allocate(ImageGeometry.RANDOM) identity = IdentityOperator(ig) #### it seems FISTA does not work with Nowm2Sq norm2sq = LeastSquares(identity, b) #norm2sq.L = 2 * norm2sq.c * identity.norm()**2 #norm2sq = OperatorCompositionFunction(L2NormSquared(b=b), identity) opt = {'tol': 1e-4, 'memopt': False} if debug_print: print("initial objective", norm2sq(initial)) alg = FISTA(initial=initial, f=norm2sq, g=ZeroFunction()) alg.max_iteration = 2 alg.run(20, verbose=0) self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array()) alg = FISTA(initial=initial, f=norm2sq, g=ZeroFunction(), max_iteration=2, update_objective_interval=3) self.assertTrue(alg.max_iteration == 2) self.assertTrue(alg.update_objective_interval == 3) alg.run(20, verbose=0) self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
def test_FISTA_catch_Lipschitz(self): if debug_print: print("Test FISTA catch Lipschitz") ig = ImageGeometry(127, 139, 149) initial = ImageData(geometry=ig) initial = ig.allocate() b = initial.copy() # fill with random numbers b.fill(numpy.random.random(initial.shape)) initial = ig.allocate(ImageGeometry.RANDOM) identity = IdentityOperator(ig) #### it seems FISTA does not work with Nowm2Sq norm2sq = LeastSquares(identity, b) if debug_print: print('Lipschitz', norm2sq.L) # norm2sq.L = None #norm2sq.L = 2 * norm2sq.c * identity.norm()**2 #norm2sq = OperatorCompositionFunction(L2NormSquared(b=b), identity) opt = {'tol': 1e-4, 'memopt': False} if debug_print: print("initial objective", norm2sq(initial)) try: alg = FISTA(initial=initial, f=L1Norm(), g=ZeroFunction()) self.assertTrue(False) except ValueError as ve: print(ve) self.assertTrue(True)
def setUp(self, *args, **kwargs): M, N, K = 3, 4, 5 self.ig = ImageGeometry(M, N, K) self.x = self.ig.allocate('random', seed=1) self.b = self.ig.allocate('random', seed=2) self.eta = self.ig.allocate(0.1) self.operator = IdentityOperator(self.ig) scalar = 0.25 self.f1 = L2NormSquared() self.f2 = L1Norm() self.f3 = scalar * L2NormSquared() self.f4 = scalar * L1Norm() self.f5 = scalar * L2NormSquared(b=self.b) self.f6 = scalar * L1Norm(b=self.b) self.f7 = ZeroFunction() self.f8 = 5 * ConstantFunction(10) self.f9 = LeastSquares(self.operator, self.b, c=scalar) self.f10 = 0.5 * KullbackLeibler(b=self.b, eta=self.eta) self.f11 = KullbackLeibler(b=self.b, eta=self.eta) self.f12 = 10 self.list1 = [self.f1, self.f2, self.f3, self.f4, self.f5, \ self.f6, self.f7, self.f8, self.f9, self.f10, self.f11, self.f12]
def __init__(self, initial=None, f=None, g=ZeroFunction(), **kwargs): '''FISTA algorithm creator initialisation can be done at creation time if all proper variables are passed or later with set_up Optional parameters: :param initial: Initial guess ( Default initial = 0) :param f: Differentiable function :param g: Convex function with " simple " proximal operator''' super(FISTA, self).__init__(**kwargs) if kwargs.get('x_init', None) is not None: if initial is None: warnings.warn( 'The use of the x_init parameter is deprecated and will be removed in following version. Use initial instead', DeprecationWarning, stacklevel=4) initial = kwargs.get('x_init', None) else: raise ValueError('{} received both initial and the deprecated x_init parameter. It is not clear which one we should use.'\ .format(self.__class__.__name__)) if initial is not None and f is not None: self.set_up(initial=initial, f=f, g=g)
def test_compare_with_PDHG(self): # Load an image from the CIL gallery. data = dataexample.SHAPES.get() ig = data.geometry # Add gaussian noise noisy_data = applynoise.gaussian(data, seed=10, var=0.005) # TV regularisation parameter alpha = 1 # fidelity = 0.5 * L2NormSquared(b=noisy_data) # fidelity = L1Norm(b=noisy_data) fidelity = KullbackLeibler(b=noisy_data, use_numba=False) # Setup and run the PDHG algorithm F = BlockFunction(alpha * MixedL21Norm(), fidelity) G = ZeroFunction() K = BlockOperator(GradientOperator(ig), IdentityOperator(ig)) # Compute operator Norm normK = K.norm() # Primal & dual stepsizes sigma = 1. / normK tau = 1. / normK pdhg = PDHG(f=F, g=G, operator=K, tau=tau, sigma=sigma, max_iteration=100, update_objective_interval=10) pdhg.run(verbose=0) sigma = 1 tau = sigma / normK**2 admm = LADMM(f=G, g=F, operator=K, tau=tau, sigma=sigma, max_iteration=100, update_objective_interval=10) admm.run(verbose=0) from cil.utilities.quality_measures import psnr if debug_print: print("PSNR", psnr(admm.solution, pdhg.solution)) np.testing.assert_almost_equal(psnr(admm.solution, pdhg.solution), 84.46678222768597, decimal=4)
def test_exception_initial_FISTA(self): if debug_print: print ("Test FISTA") ig = ImageGeometry(127,139,149) initial = ig.allocate() b = initial.copy() # fill with random numbers b.fill(numpy.random.random(initial.shape)) initial = ig.allocate(ImageGeometry.RANDOM) identity = IdentityOperator(ig) norm2sq = OperatorCompositionFunction(L2NormSquared(b=b), identity) opt = {'tol': 1e-4, 'memopt':False} if debug_print: print ("initial objective", norm2sq(initial)) try: alg = FISTA(initial=initial, f=norm2sq, g=ZeroFunction(), x_init=initial) assert False except ValueError as ve: assert True
def set_up(self, initial, f, g=ZeroFunction()): '''initialisation of the algorithm :param initial: Initial guess ( Default initial = 0) :param f: Differentiable function :param g: Convex function with " simple " proximal operator''' print("{} setting up".format(self.__class__.__name__, )) self.y = initial.copy() self.x_old = initial.copy() self.x = initial.copy() self.u = initial.copy() self.f = f self.g = g if f.L is None: raise ValueError('Error: Fidelity Function\'s Lipschitz constant is set to None') self.invL = 1/f.L self.t = 1 self.configured = True print("{} configured".format(self.__class__.__name__, ))
def set_up_TV_regularisation(image_geometry: ImageGeometry, acquisition_data: AcquisitionData, alpha: float): # Forward operator A2d = ProjectionOperator(image_geometry, acquisition_data.geometry, 'gpu') # Set up TV regularisation # Define Gradient Operator and BlockOperator Grad = GradientOperator(image_geometry) K = BlockOperator(alpha * Grad, A2d) # Define BlockFunction F using the MixedL21Norm() and the L2NormSquared() # alpha = 1.0 # f1 = alpha * MixedL21Norm() f1 = MixedL21Norm() # f2 = 0.5 * L2NormSquared(b=ad2d) f2 = L2NormSquared(b=acquisition_data) # F = BlockFunction(f1,f2) # Define Function G simply as zero G = ZeroFunction() return (K, f1, f2, G)
op1 = GradientOperator(ig_gray, correlation=GradientOperator.CORRELATION_SPACE) op2 = BOP # Set regularisation parameter. alpha = 0.02 # Create functions to be blocked with operators f1 = alpha * MixedL21Norm() f2 = 0.5 * L2NormSquared(b=blurredimage) # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2,1) ) # Create functions f = BlockFunction(f1, f2) g = ZeroFunction() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1/(sigma*normK**2) # Setup and run the PDHG algorithm pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 10000 pdhg.update_objective_interval = 1 pdhg.run(200,very_verbose=True) # Show results
def test_SumFunction(self): M, N, K = 3,4,5 ig = ImageGeometry(M, N, K) tmp = ig.allocate('random', seed=1) b = ig.allocate('random', seed=2) eta = ig.allocate(0.1) operator = IdentityOperator(ig) scalar = 0.25 f1 = L2NormSquared() f2 = L1Norm() f3 = scalar * L2NormSquared() f4 = scalar * L1Norm() f5 = scalar * L2NormSquared(b=b) f6 = scalar * L1Norm(b=b) f7 = ZeroFunction() f8 = 5 * ConstantFunction(10) f9 = LeastSquares(operator, b, c=scalar) f10 = 0.5*KullbackLeibler(b=b,eta = eta) f11 = KullbackLeibler(b=b, eta =eta) f12 = 10 # f10 = 0.5 * MixedL21Norm() # f11 = IndicatorBox(lower=0) list1 = [f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12] print('################### Check sum of two functions ################## \n') for func in list1: # check sum of two functions if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ if isinstance(func, Number): tmp_fun_eval = func else: tmp_fun_eval = func(tmp) sumf = f1 + func self.assertNumpyArrayAlmostEqual(sumf(tmp), f1(tmp) + tmp_fun_eval ) print('{} = ( {} + {} ) is OK'.format(type(sumf).__name__, type(f1).__name__, type_fun)) sumf1 = func + f1 self.assertNumpyArrayAlmostEqual(sumf1(tmp), tmp_fun_eval + f1(tmp)) print('Checking commutative') print('{} + ( {} + {} ) is OK\n'.format(type(sumf1).__name__, type_fun, type(f1).__name__)) print('################### Check Lispchitz constant ################## \n') for i,func in enumerate(list1): if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ try: # check Lispchitz sum of two functions print ("i", i,func.__class__.__name__) if isinstance(func, Number): tmp_fun_L = 0 else: tmp_fun_L = func.L sumf = f1 + func try: sumf.L==f1.L + tmp_fun_L except TypeError: print('Function {} has L = None'.format(type_fun)) except ValueError as nie: print (func.__class__.__name__, nie) print('\n################### Check Gradient ################## \n') for func in list1: if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ sumf = f1 + func # check gradient try: if isinstance(func, Number): tmp_fun_gradient = 0 else: tmp_fun_gradient = func.gradient(tmp) self.assertNumpyArrayAlmostEqual(sumf.gradient(tmp).as_array(), (f1.gradient(tmp) + tmp_fun_gradient).as_array()) except NotImplementedError: print("{} is not differentiable".format(type_fun)) print('\n################### Check Gradient Out ################## \n') out_left = ig.allocate() out_right1 = ig.allocate() out_right2 = ig.allocate() for i, func in enumerate(list1): if isinstance(func, ScaledFunction): type_fun = ' scalar * ' + type(func.function).__name__ else: type_fun = type(func).__name__ sumf = f1 + func # check gradient out try: if isinstance(func, Number): tmp_fun_gradient_out = 0 else: func.gradient(tmp, out = out_right2) tmp_fun_gradient_out = out_right2.as_array() #print('Check {} + {}\n'.format(type(f1).__name__, type_fun)) sumf.gradient(tmp, out = out_left) f1.gradient(tmp, out = out_right1) self.assertNumpyArrayAlmostEqual(out_left.as_array(), out_right1.as_array() + tmp_fun_gradient_out) except NotImplementedError: print("{} is not differentiable".format(type_fun))