def test_FISTA_Norm2Sq(self): print ("Test FISTA Norm2Sq") ig = ImageGeometry(127,139,149) b = ig.allocate(ImageGeometry.RANDOM) # fill with random numbers x_init = ig.allocate(ImageGeometry.RANDOM) identity = Identity(ig) #### it seems FISTA does not work with Nowm2Sq norm2sq = LeastSquares(identity, b) #norm2sq.L = 2 * norm2sq.c * identity.norm()**2 #norm2sq = FunctionOperatorComposition(L2NormSquared(b=b), identity) opt = {'tol': 1e-4, 'memopt':False} print ("initial objective", norm2sq(x_init)) alg = FISTA(x_init=x_init, f=norm2sq, g=ZeroFunction()) alg.max_iteration = 2 alg.run(20, verbose=True) self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array()) alg = FISTA(x_init=x_init, f=norm2sq, g=ZeroFunction(), max_iteration=2, update_objective_interval=3) self.assertTrue(alg.max_iteration == 2) self.assertTrue(alg.update_objective_interval== 3) alg.run(20, verbose=True) self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
def test_FISTA_catch_Lipschitz(self): print ("Test FISTA catch Lipschitz") ig = ImageGeometry(127,139,149) x_init = ImageData(geometry=ig) x_init = ig.allocate() b = x_init.copy() # fill with random numbers b.fill(numpy.random.random(x_init.shape)) x_init = ig.allocate(ImageGeometry.RANDOM) identity = Identity(ig) #### it seems FISTA does not work with Nowm2Sq norm2sq = LeastSquares(identity, b) print ('Lipschitz', norm2sq.L) norm2sq.L = None #norm2sq.L = 2 * norm2sq.c * identity.norm()**2 #norm2sq = FunctionOperatorComposition(L2NormSquared(b=b), identity) opt = {'tol': 1e-4, 'memopt':False} print ("initial objective", norm2sq(x_init)) try: alg = FISTA(x_init=x_init, f=norm2sq, g=ZeroFunction()) self.assertTrue(False) except ValueError as ve: print (ve) self.assertTrue(True)
def __init__(self, x_init=None, f=None, g=ZeroFunction(), **kwargs): '''FISTA algorithm creator initialisation can be done at creation time if all proper variables are passed or later with set_up Optional parameters: :param x_init: Initial guess ( Default x_init = 0) :param f: Differentiable function :param g: Convex function with " simple " proximal operator''' super(FISTA, self).__init__(**kwargs) if x_init is not None and f is not None: self.set_up(x_init=x_init, f=f, g=g)
def set_up(self, x_init, f, g=ZeroFunction()): '''initialisation of the algorithm :param x_init: Initial guess ( Default x_init = 0) :param f: Differentiable function :param g: Convex function with " simple " proximal operator''' print("{} setting up".format(self.__class__.__name__, )) self.y = x_init.copy() self.x_old = x_init.copy() self.x = x_init.copy() self.u = x_init.copy() self.f = f self.g = g if f.L is None: raise ValueError('Error: Fidelity Function\'s Lipschitz constant is set to None') self.invL = 1/f.L self.t = 1 self.update_objective() self.configured = True print("{} configured".format(self.__class__.__name__, ))
x_init = ig.allocate() cgls = CGLS(x_init=x_init, operator=op_CGLS, data=block_data) cgls.max_iteration = 1000 cgls.update_objective_interval = 200 cgls.run(1000, verbose=False) #Setup and run the PDHG algorithm # Create BlockOperator op_PDHG = BlockOperator(Grad, Aop, shape=(2, 1)) # Create functions f1 = 0.5 * alpha**2 * L2NormSquared() f2 = 0.5 * L2NormSquared(b=noisy_data) f = BlockFunction(f1, f2) g = ZeroFunction() ## Compute operator Norm normK = op_PDHG.norm() ## Primal & dual stepsizes sigma = 10 tau = 1 / (sigma * normK**2) pdhg = PDHG(f=f, g=g, operator=op_PDHG, tau=tau, sigma=sigma) pdhg.max_iteration = 1000 pdhg.update_objective_interval = 200 pdhg.run(1000, verbose=False) # Show results plt.figure(figsize=(10, 10))
op31 = Identity(ig, ag) op32 = ZeroOperator(op22.domain_geometry(), ag) operator = BlockOperator(op11, -1 * op12, op21, op22, op31, op32, shape=(3, 2)) f1 = alpha * MixedL21Norm() f2 = beta * MixedL21Norm() f = BlockFunction(f1, f2, f3) g = ZeroFunction() else: # Create operators op11 = Gradient(ig) op12 = Identity(op11.range_geometry()) op22 = SymmetrizedGradient(op11.domain_geometry()) op21 = ZeroOperator(ig, op22.range_geometry()) operator = BlockOperator(op11, -1 * op12, op21, op22, shape=(2, 2)) f1 = alpha * MixedL21Norm() f2 = beta * MixedL21Norm() f = BlockFunction(f1, f2)
axarro[k].imshow(z.as_array()[k], vmin=0, vmax=3500) plt.show() # Using the test data b, different reconstruction methods can now be set up as # demonstrated in the rest of this file. In general all methods need an initial # guess and some algorithm options to be set: x_init = ig.allocate(0.0) opt = {'tol': 1e-4, 'iter': 200} # Create least squares object instance with projector, test data and a constant # coefficient of 0.5. Note it is least squares over all channels: #f = Norm2Sq(Aop,b,c=0.5) f = FunctionOperatorComposition(L2NormSquared(b=b), Aop) # Run FISTA for least squares without regularization FISTA_alg = FISTA() FISTA_alg.set_up(x_init=x_init, f=f, g=ZeroFunction()) FISTA_alg.max_iteration = 2000 FISTA_alg.run(opt['iter']) x_FISTA = FISTA_alg.get_output() # Display reconstruction and criterion ff0, axarrf0 = plt.subplots(1, numchannels) for k in numpy.arange(3): axarrf0[k].imshow(x_FISTA.as_array()[k], vmin=0, vmax=2.5) plt.show() plt.figure() plt.semilogy(FISTA_alg.objective) plt.title('Criterion vs iterations, least squares') plt.show()
plt.figure() plt.semilogy(CGLS_alg.objective) plt.title('CGLS criterion') plt.show() # CGLS solves the simple least-squares problem. The same problem can be solved # by FISTA by setting up explicitly a least squares function object and using # no regularisation: # Create least squares object instance with projector, test data and a constant # coefficient of 0.5: f = Norm2Sq(A=Aop, b=b, c=1) #f= FunctionOperatorComposition(L2NormSquared(b=b),Aop) # Run FISTA for least squares without constraints FISTA_alg = FISTA(x_init=x_init, f=f, g=ZeroFunction()) FISTA_alg.max_iteration = 2000 FISTA_alg.run(opt['iter']) x_FISTA = FISTA_alg.get_output() plt.figure() plt.imshow(x_FISTA.as_array()) plt.title('FISTA Least squares reconstruction') plt.colorbar() plt.show() plt.figure() plt.semilogy(FISTA_alg.objective) plt.title('FISTA Least squares criterion') plt.show()
op11 = Gradient(ig) op12 = Identity(op11.range_geometry()) op22 = SymmetrizedGradient(op11.range_geometry()) op21 = ZeroOperator(ig, op22.range_geometry()) op31 = Identity(ig, ag) op32 = ZeroOperator(op22.domain_geometry(), ag) operator = BlockOperator(op11, -1*op12, op21, op22, op31, op32, shape=(3,2) ) f1 = alpha * MixedL21Norm() f2 = beta * MixedL21Norm() f = BlockFunction(f1, f2, f3) g = ZeroFunction() else: # Create operators op11 = Gradient(ig) op12 = Identity(op11.range_geometry()) op22 = SymmetrizedGradient(op11.domain_geometry()) op21 = ZeroOperator(ig, op22.range_geometry()) operator = BlockOperator(op11, -1*op12, op21, op22, shape=(2,2) ) f1 = alpha * MixedL21Norm() f2 = beta * MixedL21Norm() f = BlockFunction(f1, f2)
NotImplemented Aop = AstraProjectorSimple(ig, ag, dev) sin = Aop.direct(data) eta = 0 noisy_data = AcquisitionData(sin.as_array() + np.random.normal(0, 1, ag.shape)) back_proj = Aop.adjoint(noisy_data) # Define Least Squares f = FunctionOperatorComposition(L2NormSquared(b=noisy_data), Aop) # Allocate solution x_init = ig.allocate() # Run FISTA for least squares fista = FISTA(x_init=x_init, f=f, g=ZeroFunction()) fista.max_iteration = 10 fista.update_objective_interval = 2 fista.run(100, verbose=True) # Run FISTA for least squares with lower/upper bound fista0 = FISTA(x_init=x_init, f=f, g=IndicatorBox(lower=0, upper=1)) fista0.max_iteration = 10 fista0.update_objective_interval = 2 fista0.run(100, verbose=True) # Run FISTA for Regularised least squares, with Squared norm of Gradient alpha = 20 Grad = Gradient(ig) block_op = BlockOperator(Aop, alpha * Grad, shape=(2, 1)) block_data = BlockDataContainer(noisy_data, Grad.range_geometry().allocate())
op22 = SymmetrizedGradient(op11.domain_geometry()) op21 = ZeroOperator(ig, op22.range_geometry()) op31 = Aop op32 = ZeroOperator(op22.domain_geometry(), ag) operator = BlockOperator(op11, -1 * op12, op21, op22, op31, op32, shape=(3, 2)) normK = operator.norm() # Create functions if noise == 'poisson': alpha = 2 beta = 3 f3 = KullbackLeibler(noisy_data) g = BlockFunction(IndicatorBox(lower=0), ZeroFunction()) # Primal & dual stepsizes sigma = 1 tau = 1 / (sigma * normK**2) elif noise == 'gaussian': alpha = 20 beta = 50 f3 = 0.5 * L2NormSquared(b=noisy_data) g = BlockFunction(ZeroFunction(), ZeroFunction()) # Primal & dual stepsizes sigma = 10 tau = 1 / (sigma * normK**2)
def test_FISTA_cvx(self): if False: if not cvx_not_installable: try: # Problem data. m = 30 n = 20 np.random.seed(1) Amat = np.random.randn(m, n) A = LinearOperatorMatrix(Amat) bmat = np.random.randn(m) bmat.shape = (bmat.shape[0], 1) # A = Identity() # Change n to equal to m. #b = DataContainer(bmat) vg = VectorGeometry(m) b = vg.allocate('random') # Regularization parameter lam = 10 opt = {'memopt': True} # Create object instances with the test data A and b. f = LeastSquares(A, b, c=0.5) g0 = ZeroFunction() # Initial guess #x_init = DataContainer(np.zeros((n, 1))) x_init = vg.allocate() f.gradient(x_init, out = x_init) # Run FISTA for least squares plus zero function. #x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0, opt=opt) fa = FISTA(x_init=x_init, f=f, g=g0) fa.max_iteration = 10 fa.run(10) # Print solution and final objective/criterion value for comparison print("FISTA least squares plus zero function solution and objective value:") print(fa.get_output()) print(fa.get_last_objective()) # Compare to CVXPY # Construct the problem. x0 = Variable(n) objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0])) prob0 = Problem(objective0) # The optimal objective is returned by prob.solve(). result0 = prob0.solve(verbose=False, solver=SCS, eps=1e-9) # The optimal solution for x is stored in x.value and optimal objective value # is in result as well as in objective.value print("CVXPY least squares plus zero function solution and objective value:") print(x0.value) print(objective0.value) self.assertNumpyArrayAlmostEqual( numpy.squeeze(x_fista0.array), x0.value, 6) except SolverError as se: print (str(se)) self.assertTrue(True) else: self.assertTrue(cvx_not_installable)
def stest_FISTA_Norm1_cvx(self): if not cvx_not_installable: try: opt = {'memopt': True} # Problem data. m = 30 n = 20 np.random.seed(1) Amat = np.random.randn(m, n) A = LinearOperatorMatrix(Amat) bmat = np.random.randn(m) #bmat.shape = (bmat.shape[0], 1) # A = Identity() # Change n to equal to m. vgb = VectorGeometry(m) vgx = VectorGeometry(n) b = vgb.allocate() b.fill(bmat) #b = DataContainer(bmat) # Regularization parameter lam = 10 opt = {'memopt': True} # Create object instances with the test data A and b. f = LeastSquares(A, b, c=0.5) g0 = ZeroFunction() # Initial guess #x_init = DataContainer(np.zeros((n, 1))) x_init = vgx.allocate() # Create 1-norm object instance g1 = lam * L1Norm() g1(x_init) g1.prox(x_init, 0.02) # Combine with least squares and solve using generic FISTA implementation #x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt) fa = FISTA(x_init=x_init, f=f, g=g1) fa.max_iteration = 10 fa.run(10) # Print for comparison print("FISTA least squares plus 1-norm solution and objective value:") print(fa.get_output()) print(fa.get_last_objective()) # Compare to CVXPY # Construct the problem. x1 = Variable(n) objective1 = Minimize( 0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1, 1)) prob1 = Problem(objective1) # The optimal objective is returned by prob.solve(). result1 = prob1.solve(verbose=False, solver=SCS, eps=1e-9) # The optimal solution for x is stored in x.value and optimal objective value # is in result as well as in objective.value print("CVXPY least squares plus 1-norm solution and objective value:") print(x1.value) print(objective1.value) self.assertNumpyArrayAlmostEqual( numpy.squeeze(x_fista1.array), x1.value, 6) except SolverError as se: print (str(se)) self.assertTrue(True) else: self.assertTrue(cvx_not_installable)
x_init = ig.allocate() cgls = CGLS() cgls.set_up(x_init=x_init, operator=Aop, data=sinogram) cgls.max_iteration = 500 cgls.update_objective_interval = 100 cgls.run(500, verbose=True) #%% ############################################################################### # Setup and run the PDHG algorithm print("Running PDHG reconstruction") operator = Aop f = L2NormSquared(b=sinogram) g = ZeroFunction() ## Compute operator Norm normK = operator.norm() ## Primal & dual stepsizes sigma = 0.02 tau = 1 / (sigma * normK**2) pdhg = PDHG() pdhg.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 1000 pdhg.update_objective_interval = 100 pdhg.run(1000, verbose=True) #%%