def test_PowerMethod(self): print ("test_BlockOperator") N, M = 200, 300 niter = 10 ig = ImageGeometry(N, M) Id = Identity(ig) G = Gradient(ig) uid = Id.domain_geometry().allocate(ImageGeometry.RANDOM_INT, seed=1) a = LinearOperator.PowerMethod(Id, niter, uid) #b = LinearOperator.PowerMethodNonsquare(Id, niter, uid) b = LinearOperator.PowerMethod(Id, niter) print ("Edo impl", a[0]) print ("None impl", b[0]) #self.assertAlmostEqual(a[0], b[0]) self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=6) a = LinearOperator.PowerMethod(G, niter, uid) b = LinearOperator.PowerMethod(G, niter) #b = LinearOperator.PowerMethodNonsquare(G, niter, uid) print ("Edo impl", a[0]) #print ("old impl", b[0]) self.assertNumpyArrayAlmostEqual(a[0],b[0],decimal=2)
def test_Gradient_linearity(self): nc, nz, ny, nx = 3, 4, 5, 6 ig = ImageGeometry(voxel_num_x=nx, voxel_num_y=ny, voxel_num_z=nz, channels=nc) grad = Gradient(ig, bnd_cond='Neumann', correlation='SpaceChannels', backend='c') self.assertTrue(LinearOperator.dot_test(grad)) grad = Gradient(ig, bnd_cond='Periodic', correlation='SpaceChannels', backend='c') self.assertTrue(LinearOperator.dot_test(grad)) grad = Gradient(ig, bnd_cond='Neumann', correlation='SpaceChannels', backend='numpy') self.assertTrue(LinearOperator.dot_test(grad)) grad = Gradient(ig, bnd_cond='Periodic', correlation='SpaceChannels', backend='numpy') self.assertTrue(LinearOperator.dot_test(grad))
def norm(self): x0 = self.domain_geometry().allocate(ImageGeometry.RANDOM, #dimension_labels=['horizontal_x', 'horizontal_y','vertical'] ) print (x0.dimension_labels) a = LinearOperator.PowerMethod(self,10, x0) self.s1 = a[0] return self.s1
def test_SymmetrizedGradient3b(self): ########################################################################### # 3D geometry no channels #ig3 = ImageGeometry(N, M, K) Grad3 = Gradient(self.ig3, correlation = 'Space') E3 = SymmetrizedGradient(Grad3.range_geometry()) u3 = E3.domain_geometry().allocate('random_int') w3 = E3.range_geometry().allocate('random_int', symmetry = True) # lhs3 = E3.direct(u3).dot(w3) rhs3 = u3.dot(E3.adjoint(w3)) numpy.testing.assert_almost_equal(lhs3, rhs3) self.assertAlmostEqual(lhs3, rhs3) print (lhs3, rhs3, abs((rhs3-lhs3)/rhs3) , 1.5 * 10**(-4), abs((rhs3-lhs3)/rhs3) < 1.5 * 10**(-4)) self.assertTrue( LinearOperator.dot_test(E3, range_init = w3, domain_init=u3) )
def norm(self, **kwargs): '''Returns the norm of the BlockOperator if the operator in the block do not have method norm defined, i.e. they are SIRF AcquisitionModel's we use PowerMethod if applicable, otherwise we raise an Error ''' norm = [] for op in self.operators: if hasattr(op, 'norm'): norm.append(op.norm(**kwargs)**2.) else: # use Power method if op.is_linear(): norm.append(LinearOperator.PowerMethod(op, 20)[0]) else: raise TypeError( 'Operator {} does not have a norm method and is not linear' .format(op)) return numpy.sqrt(sum(norm))
def __init__(self, A, b, c=1.0): super(LeastSquares, self).__init__() self.A = A # Should be an operator, default identity self.b = b # Default zero DataSet? self.c = c # Default 1. self.range_tmp = A.range_geometry().allocate() # Compute the Lipschitz parameter from the operator if possible # Leave it initialised to None otherwise try: self.L = 2.0*self.c*(self.A.norm()**2) except AttributeError as ae: if self.A.is_linear(): Anorm = LinearOperator.PowerMethod(self.A, 10)[0] self.L = 2.0 * self.c * (Anorm*Anorm) else: warnings.warn('{} could not calculate Lipschitz Constant. {}'.format( self.__class__.__name__, ae)) except NotImplementedError as noe: warnings.warn('{} could not calculate Lipschitz Constant. {}'.format( self.__class__.__name__, noe))
def test_dot_test2(self): Grad3 = Gradient(self.ig3, correlation = 'SpaceChannel', backend='c') # self.assertAlmostEqual(lhs3, rhs3) self.assertTrue( LinearOperator.dot_test(Grad3 , verbose=True)) self.assertTrue( LinearOperator.dot_test(Grad3 , decimal=6, verbose=True))
def norm(self): x0 = self.volume_geometry.allocate('random') self.s1, sall, svec = LinearOperator.PowerMethod(self, 50, x0) return self.s1
def skip_test_FISTA_denoise_cvx(self): if not cvx_not_installable: opt = {'memopt': True} N = 64 ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N) Phantom = ImageData(geometry=ig) x = Phantom.as_array() x[int(round(N/4)):int(round(3*N/4)), int(round(N/4)):int(round(3*N/4))] = 0.5 x[int(round(N/8)):int(round(7*N/8)), int(round(3*N/8)):int(round(5*N/8))] = 1 # Identity operator for denoising I = TomoIdentity(ig) # Data and add noise y = I.direct(Phantom) y.array = y.array + 0.1*np.random.randn(N, N) # Data fidelity term f_denoise = LeastSquares(I, y, c=0.5, memopt=True) x_init = ImageData(geometry=ig) f_denoise.L = LinearOperator.PowerMethod(I, 25, x_init)[0] # 1-norm regulariser lam1_denoise = 1.0 g1_denoise = Norm1(lam1_denoise) # Initial guess x_init_denoise = ImageData(np.zeros((N, N))) # Combine with least squares and solve using generic FISTA implementation x_fista1_denoise, it1_denoise, timing1_denoise, \ criter1_denoise = \ FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) print(x_fista1_denoise) print(criter1_denoise[-1]) # Now denoise LS + 1-norm with FBPD x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise,\ criterfbpd1_denoise = \ FBPD(x_init_denoise, I, None, f_denoise, g1_denoise) print(x_fbpd1_denoise) print(criterfbpd1_denoise[-1]) # Compare to CVXPY # Construct the problem. x1_denoise = Variable(N**2) objective1_denoise = Minimize( 0.5*sum_squares(x1_denoise - y.array.flatten()) + lam1_denoise*norm(x1_denoise, 1)) prob1_denoise = Problem(objective1_denoise) # The optimal objective is returned by prob.solve(). result1_denoise = prob1_denoise.solve( verbose=False, solver=SCS, eps=1e-12) # The optimal solution for x is stored in x.value and optimal objective value # is in result as well as in objective.value print("CVXPY least squares plus 1-norm solution and objective value:") print(x1_denoise.value) print(objective1_denoise.value) self.assertNumpyArrayAlmostEqual( x_fista1_denoise.array.flatten(), x1_denoise.value, 5) self.assertNumpyArrayAlmostEqual( x_fbpd1_denoise.array.flatten(), x1_denoise.value, 5) else: self.assertTrue(cvx_not_installable)
def skip_test_FBPD_Norm1_cvx(self): print ("test_FBPD_Norm1_cvx") if not cvx_not_installable: opt = {'memopt': True} # Problem data. m = 30 n = 20 np.random.seed(1) Amat = np.random.randn(m, n) A = LinearOperatorMatrix(Amat) bmat = np.random.randn(m) bmat.shape = (bmat.shape[0], 1) # A = Identity() # Change n to equal to m. b = DataContainer(bmat) # Regularization parameter lam = 10 opt = {'memopt': True} # Initial guess x_init = DataContainer(np.random.randn(n, 1)) # Create object instances with the test data A and b. f = LeastSquares(A, b, c=0.5, memopt=True) f.L = LinearOperator.PowerMethod(A, 25, x_init)[0] print ("Lipschitz", f.L) g0 = ZeroFun() # Create 1-norm object instance g1 = Norm1(lam) # Compare to CVXPY # Construct the problem. x1 = Variable(n) objective1 = Minimize( 0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1, 1)) prob1 = Problem(objective1) # The optimal objective is returned by prob.solve(). result1 = prob1.solve(verbose=False, solver=SCS, eps=1e-9) # The optimal solution for x is stored in x.value and optimal objective value # is in result as well as in objective.value print("CVXPY least squares plus 1-norm solution and objective value:") print(x1.value) print(objective1.value) # Now try another algorithm FBPD for same problem: x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, Identity(), None, f, g1) print(x_fbpd1) print(criterfbpd1[-1]) self.assertNumpyArrayAlmostEqual( numpy.squeeze(x_fbpd1.array), x1.value, 6) else: self.assertTrue(cvx_not_installable)