def test_solve_large_2d_with_default_guess_using_c_ext_with_jacobi(self): """Standard 2d laplacian using default first guess""" n = 20 m = 10 A = Sparse(m * n, m * n) for i in num.arange(0, n): for j in num.arange(0, m): I = j + m * i A[I, I] = 4.0 if i > 0: A[I, I - m] = -1.0 if i < n - 1: A[I, I + m] = -1.0 if j > 0: A[I, I - 1] = -1.0 if j < m - 1: A[I, I + 1] = -1.0 xe = num.ones((n * m, ), num.float) A = Sparse_CSR(A) b = A * xe x = conjugate_gradient(A, b, use_c_cg=True, precon='Jacobi') assert num.allclose(x, xe)
def test_solve_large_2d_csr_matrix_using_c_ext(self): """Standard 2d laplacian with csr format """ n = 100 m = 100 A = Sparse(m * n, m * n) for i in num.arange(0, n): for j in num.arange(0, m): I = j + m * i A[I, I] = 4.0 if i > 0: A[I, I - m] = -1.0 if i < n - 1: A[I, I + m] = -1.0 if j > 0: A[I, I - 1] = -1.0 if j < m - 1: A[I, I + 1] = -1.0 xe = num.ones((n * m, ), num.float) # Convert to csr format #print 'start covert' A = Sparse_CSR(A) #print 'finish covert' b = A * xe x = conjugate_gradient(A, b, b, iprint=20, use_c_cg=True) assert num.allclose(x, xe)
def test_solve_large_using_c_ext_with_jacobi(self): """Standard 1d laplacian """ n = 50 A = Sparse(n, n) for i in num.arange(0, n): A[i, i] = 1.0 if i > 0: A[i, i - 1] = -0.5 if i < n - 1: A[i, i + 1] = -0.5 xe = num.ones((n, ), num.float) b = A * xe A = Sparse_CSR(A) x = conjugate_gradient(A, b, b, tol=1.0e-5, use_c_cg=True, precon='Jacobi') assert num.allclose(x, xe)
def test_solve_large_2d_using_c_ext(self): """Standard 2d laplacian""" n = 20 m = 10 A = Sparse(m * n, m * n) for i in num.arange(0, n): for j in num.arange(0, m): I = j + m * i A[I, I] = 4.0 if i > 0: A[I, I - m] = -1.0 if i < n - 1: A[I, I + m] = -1.0 if j > 0: A[I, I - 1] = -1.0 if j < m - 1: A[I, I + 1] = -1.0 xe = num.ones((n * m, ), num.float) A = Sparse_CSR(A) b = A * xe x = conjugate_gradient(A, b, b, iprint=1, use_c_cg=True) assert num.allclose(x, xe)
def test_sparse_solve_using_c_ext_with_jacobi(self): """Solve Small Sparse Matrix""" A = [[2.0, -1.0, 0.0, 0.0], [-1.0, 2.0, -1.0, 0.0], [0.0, -1.0, 2.0, -1.0], [0.0, 0.0, -1.0, 2.0]] A = Sparse_CSR(Sparse(A)) xe = [0.0, 1.0, 2.0, 3.0] b = A * xe x = [0.0, 0.0, 0.0, 0.0] x = conjugate_gradient(A, b, x, use_c_cg=True, precon='Jacobi') assert num.allclose(x, xe)
def test_sparse_solve_matrix_using_c_ext(self): """Solve Small Sparse Matrix""" A = [[2.0, -1.0, 0.0, 0.0], [-1.0, 2.0, -1.0, 0.0], [0.0, -1.0, 2.0, -1.0], [0.0, 0.0, -1.0, 2.0]] A = Sparse_CSR(Sparse(A)) xe = [[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]] b = A * xe x = [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] x = conjugate_gradient(A, b, x, iprint=0, use_c_cg=True) assert num.allclose(x, xe)
def _build_coefficient_matrix_B(self, verbose=False): """ Build final coefficient matrix from AtA and D """ msize = self.mesh.number_of_nodes self.B = fitsmooth.build_matrix_B(self.D, \ self.AtA, self.alpha) # Convert self.B matrix to CSR format self.B = Sparse_CSR(data=num.array(self.B[0]),\ Colind=num.array(self.B[1]),\ rowptr=num.array(self.B[2]), \ m=msize, n=msize)
def build_elliptic_matrix(self, a): """ Builds matrix representing div ( a grad ) which has the form [ A B ] """ #Arrays self.operator_data, self.operator_colind, self.operator_rowptr # are setup via this call kinematic_viscosity_operator_ext.build_elliptic_matrix(self, \ a.centroid_values, \ a.boundary_values) self.elliptic_matrix = Sparse_CSR(None, \ self.operator_data, self.operator_colind, self.operator_rowptr, \ self.n, self.tot_len)
def test_max_iter_using_c_ext(self): """Test max iteration Small Sparse Matrix""" A = [[2.0, -1.0, 0.0, 0.0], [-1.0, 2.0, -1.0, 0.0], [0.0, -1.0, 2.0, -1.0], [0.0, 0.0, -1.0, 2.0]] A = Sparse_CSR(Sparse(A)) xe = [0.0, 1.0, 2.0, 3.0] b = A * xe x = [0.0, 0.0, 0.0, 0.0] try: x = conjugate_gradient(A, b, x, imax=2, use_c_cg=True) except ConvergenceError: pass else: msg = 'Should have raised exception' raise TestError, msg
def _conjugate_gradient_preconditioned(A, b, x0, M, imax=10000, tol=1.0e-8, atol=1.0e-10, iprint=None, Type='None'): """ Try to solve linear equation Ax = b using preconditioned conjugate gradient method Input A: matrix or function which applies a matrix, assumed symmetric A can be either dense or sparse or a function (__mul__ just needs to be defined) b: right hand side x0: inital guess (default the 0 vector) imax: max number of iterations tol: tolerance used for residual Output x: approximate solution """ # Padarn note: This is temporary while the Jacboi preconditioner is the only # one avaliable. D=[] if not Type=='Jacobi': log.warning('Only the Jacobi Preconditioner is impletment cg_solve python') msg = 'Only the Jacobi Preconditioner is impletment in cg_solve python' raise PreconditionerError, msg else: D=Sparse(A.M, A.M) for i in range(A.M): D[i,i]=1/M[i] D=Sparse_CSR(D) stats = Stats() b = num.array(b, dtype=num.float) if len(b.shape) != 1: raise VectorShapeError, 'input vector should consist of only one column' if x0 is None: x0 = num.zeros(b.shape, dtype=num.float) else: x0 = num.array(x0, dtype=num.float) stats.x0 = num.linalg.norm(x0) if iprint is None or iprint == 0: iprint = imax dx = 0.0 i = 1 x = x0 r = b - A * x z = D * r d = r rTr = num.dot(r, z) rTr0 = rTr stats.rTr0 = rTr0 #FIXME Let the iterations stop if starting with a small residual while (i < imax and rTr > tol ** 2 * rTr0 and rTr > atol ** 2): q = A * d alpha = rTr / num.dot(d, q) xold = x x = x + alpha * d dx = num.linalg.norm(x-xold) #if dx < atol : # break # Padarn Note 26/11/12: This modification to the algorithm seems # unnecessary, but also seem to have been implemented incorrectly - # it was set to perform the more expensive r = b - A * x routine in # 49/50 iterations. Suggest this being either removed completely or # changed to 'if i%50==0' (or equvialent). #if i % 50: if False: r = b - A * x else: r = r - alpha * q rTrOld = rTr z = D * r rTr = num.dot(r, z) bt = rTr / rTrOld d = z + bt * d i = i + 1 if i % iprint == 0: log.info('i = %g rTr = %15.8e dx = %15.8e' % (i, rTr, dx)) if i == imax: log.warning('max number of iterations attained') msg = 'Conjugate gradient solver did not converge: rTr==%20.15e' % rTr raise ConvergenceError, msg stats.x = num.linalg.norm(x) stats.iter = i stats.rTr = rTr stats.dx = dx return x, stats
def __init__(self, domain, use_triangle_areas=True, verbose=False): if verbose: log.critical('Kinematic Viscosity: Beginning Initialisation') Operator.__init__(self, domain) #Expose the domain attributes self.mesh = self.domain.mesh self.boundary = domain.boundary self.boundary_enumeration = domain.boundary_enumeration # Setup a quantity as diffusivity # FIXME SR: Could/Should pass a quantity which already exists self.diffusivity = Quantity(self.domain) self.diffusivity.set_values(1.0) self.diffusivity.set_boundary_values(1.0) self.n = len(self.domain) self.dt = 0.0 #Need to set to domain.timestep self.dt_apply = 0.0 self.boundary_len = len(self.domain.boundary) self.tot_len = self.n + self.boundary_len self.verbose = verbose #Geometric Information if verbose: log.critical('Kinematic Viscosity: Building geometric structure') self.geo_structure_indices = num.zeros((self.n, 3), num.int) self.geo_structure_values = num.zeros((self.n, 3), num.float) # Only needs to built once, doesn't change kinematic_viscosity_operator_ext.build_geo_structure(self) # Setup type of scaling self.set_triangle_areas(use_triangle_areas) # FIXME SR: should this really be a matrix? temp = Sparse(self.n, self.n) for i in range(self.n): temp[i, i] = 1.0 / self.mesh.areas[i] self.triangle_areas = Sparse_CSR(temp) #self.triangle_areas # FIXME SR: More to do with solving equation self.qty_considered = 1 #1 or 2 (uh or vh respectively) #Sparse_CSR.data self.operator_data = num.zeros((4 * self.n, ), num.float) #Sparse_CSR.colind self.operator_colind = num.zeros((4 * self.n, ), num.int) #Sparse_CSR.rowptr (4 entries in every row, we know this already) = [0,4,8,...,4*n] self.operator_rowptr = 4 * num.arange(self.n + 1) # Build matrix self.elliptic_matrix [A B] self.build_elliptic_matrix(self.diffusivity) self.boundary_term = num.zeros((self.n, ), num.float) self.parabolic = False #Are we doing a parabolic solve at the moment? self.u_stats = None self.v_stats = None if verbose: log.critical('Elliptic Operator: Initialisation Done')