def test_elasticity(self): A, B = linear_elasticity((35, 35), format='bsr') smoother = ('gauss_seidel', {'sweep': 'symmetric', 'iterations': 2}) [asa, work] = adaptive_sa_solver(A, num_candidates=3, improvement_iters=5, prepostsmoother=smoother) sa = smoothed_aggregation_solver(A, B=B) b = sp.rand(A.shape[0]) residuals0 = [] residuals1 = [] sol0 = asa.solve(b, maxiter=20, tol=1e-10, residuals=residuals0) sol1 = sa.solve(b, maxiter=20, tol=1e-10, residuals=residuals1) del sol0, sol1 conv_asa = (residuals0[-1] / residuals0[0])**(1.0 / len(residuals0)) conv_sa = (residuals1[-1] / residuals1[0])**(1.0 / len(residuals1)) # print "ASA convergence (Elasticity) %1.2e" % (conv_asa) # print "SA convergence (Elasticity) %1.2e" % (conv_sa) assert (conv_asa < 1.3 * conv_sa)
def test_matrix_formats(self): # Do dense, csr, bsr and csc versions of A all yield the same solver A = poisson((7, 7), format='csr') cases = [A.tobsr(blocksize=(1, 1))] cases.append(A.tocsc()) cases.append(A.todense()) random.seed(0) sa_old = adaptive_sa_solver(A, initial_candidates=ones((49, 1)), max_coarse=10)[0] for AA in cases: random.seed(0) sa_new = adaptive_sa_solver(AA, initial_candidates=ones((49, 1)), max_coarse=10)[0] assert(abs(ravel(sa_old.levels[-1].A.todense() - sa_new.levels[-1].A.todense())).max() < 0.01) sa_old = sa_new
def test_matrix_formats(self): # Do dense, csr, bsr and csc versions of A all yield the same solver A = poisson((7, 7), format='csr') cases = [A.tobsr(blocksize=(1, 1))] cases.append(A.tocsc()) cases.append(A.todense()) warnings.filterwarnings('ignore', message='SparseEfficiencyWarning') np.random.seed(0) sa_old = adaptive_sa_solver(A, B=np.ones((49, 1)), max_coarse=10)[0] for AA in cases: np.random.seed(0) sa_new = adaptive_sa_solver(AA, B=np.ones((49, 1)), max_coarse=10)[0] assert(abs(np.ravel(sa_old.levels[-1].A.todense() - sa_new.levels[-1].A.todense())).max() < 0.01) sa_old = sa_new
def test_matrix_formats(self): warnings.filterwarnings('ignore', category=SparseEfficiencyWarning) # Do dense, csr, bsr and csc versions of A all yield the same solver A = poisson((7, 7), format='csr') cases = [A.tobsr(blocksize=(1, 1))] cases.append(A.tocsc()) cases.append(A.toarray()) np.random.seed(111908910) sa_old = adaptive_sa_solver(A, initial_candidates=np.ones((49, 1)), max_coarse=10)[0] for AA in cases: np.random.seed(111908910) sa_new = adaptive_sa_solver(AA, initial_candidates=np.ones((49, 1)), max_coarse=10)[0] assert(abs(np.ravel(sa_old.levels[-1].A.toarray() - sa_new.levels[-1].A.toarray())).max() < 0.01) sa_old = sa_new
def test_matrix_formats(self): # Do dense, csr, bsr and csc versions of A all yield the same solver A = poisson((7, 7), format='csr') cases = [A.tobsr(blocksize=(1, 1))] cases.append(A.tocsc()) cases.append(A.todense()) random.seed(0) sa_old = adaptive_sa_solver(A, initial_candidates=ones((49, 1)), max_coarse=10)[0] for AA in cases: random.seed(0) sa_new = adaptive_sa_solver(AA, initial_candidates=ones((49, 1)), max_coarse=10)[0] assert (abs( ravel(sa_old.levels[-1].A.todense() - sa_new.levels[-1].A.todense())).max() < 0.01) sa_old = sa_new
def test_poisson(self): cases = [] # perturbed Laplacian A = poisson((50, 50), format='csr') Ai = A.copy() Ai.data = Ai.data + 1e-5j * np.random.rand(Ai.nnz) cases.append((Ai, 0.25)) # imaginary Laplacian Ai = 1.0j * A cases.append((Ai, 0.25)) # JBS: Not sure if this is a valid test case # imaginary shift # Ai = A + 1.1j*sparse.eye(A.shape[0], A.shape[1]) # cases.append((Ai,0.8)) for A, rratio in cases: [asa, work] = adaptive_sa_solver(A, num_candidates=1, symmetry='symmetric') # sa = smoothed_aggregation_solver(A, B = np.ones((A.shape[0],1)) ) b = np.zeros((A.shape[0], )) x0 = (np.random.rand(A.shape[0], ) + 1.0j * np.random.rand(A.shape[0], )) residuals0 = [] sol0 = asa.solve(b, x0=x0, maxiter=20, tol=1e-10, residuals=residuals0) del sol0 conv_asa = \ (residuals0[-1] / residuals0[0]) ** (1.0 / len(residuals0)) assert (conv_asa < rratio)
def test_poisson(self): A = poisson((50, 50), format='csr') [asa, work] = adaptive_sa_solver(A, num_candidates=1) sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0], 1))) b = sp.rand(A.shape[0]) residuals0 = [] residuals1 = [] sol0 = asa.solve(b, maxiter=20, tol=1e-10, residuals=residuals0) sol1 = sa.solve(b, maxiter=20, tol=1e-10, residuals=residuals1) del sol0, sol1 conv_asa = (residuals0[-1] / residuals0[0])**(1.0 / len(residuals0)) conv_sa = (residuals1[-1] / residuals1[0])**(1.0 / len(residuals1)) # print "ASA convergence (Poisson)",conv_asa # print "SA convergence (Poisson)",conv_sa assert (conv_asa < 1.2 * conv_sa)
def test_poisson(self): A = poisson((50, 50), format='csr') [asa, work] = adaptive_sa_solver(A, num_candidates=1) sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0], 1))) b = sp.rand(A.shape[0]) residuals0 = [] residuals1 = [] sol0 = asa.solve(b, maxiter=20, tol=1e-10, residuals=residuals0) sol1 = sa.solve(b, maxiter=20, tol=1e-10, residuals=residuals1) del sol0, sol1 conv_asa = (residuals0[-1] / residuals0[0]) ** (1.0 / len(residuals0)) conv_sa = (residuals1[-1] / residuals1[0]) ** (1.0 / len(residuals1)) # print "ASA convergence (Poisson)",conv_asa # print "SA convergence (Poisson)",conv_sa assert(conv_asa < 1.2 * conv_sa)
def test_poisson(self): cases = [] # perturbed Laplacian A = poisson((50, 50), format='csr') Ai = A.copy() Ai.data = Ai.data + 1e-5j * sp.rand(Ai.nnz) cases.append((Ai, 0.25)) # imaginary Laplacian Ai = 1.0j * A cases.append((Ai, 0.25)) # JBS: Not sure if this is a valid test case # imaginary shift # Ai = A + 1.1j*scipy.sparse.eye(A.shape[0], A.shape[1]) # cases.append((Ai,0.8)) for A, rratio in cases: [asa, work] = adaptive_sa_solver(A, num_candidates=1, symmetry='symmetric') # sa = smoothed_aggregation_solver(A, B = np.ones((A.shape[0],1)) ) b = np.zeros((A.shape[0],)) x0 = sp.rand(A.shape[0],) + 1.0j * sp.rand(A.shape[0],) residuals0 = [] sol0 = asa.solve(b, x0=x0, maxiter=20, tol=1e-10, residuals=residuals0) del sol0 conv_asa = \ (residuals0[-1] / residuals0[0]) ** (1.0 / len(residuals0)) assert(conv_asa < rratio)
def test_elasticity(self): A, B = linear_elasticity((35, 35), format='bsr') smoother = ('gauss_seidel', {'sweep': 'symmetric', 'iterations': 2}) [asa, work] = adaptive_sa_solver(A, num_candidates=3, improvement_iters=5, prepostsmoother=smoother) sa = smoothed_aggregation_solver(A, B=B) b = sp.rand(A.shape[0]) residuals0 = [] residuals1 = [] sol0 = asa.solve(b, maxiter=20, tol=1e-10, residuals=residuals0) sol1 = sa.solve(b, maxiter=20, tol=1e-10, residuals=residuals1) del sol0, sol1 conv_asa = (residuals0[-1] / residuals0[0]) ** (1.0 / len(residuals0)) conv_sa = (residuals1[-1] / residuals1[0]) ** (1.0 / len(residuals1)) # print "ASA convergence (Elasticity) %1.2e" % (conv_asa) # print "SA convergence (Elasticity) %1.2e" % (conv_sa) assert(conv_asa < 1.3 * conv_sa)
# ----------------------------------------------------------------------------- # # ----------------------------------------------------------------------------- # # Classical aSA solver # -------------------- start = time.clock() [ml_asa, work] = adaptive_sa_solver(A, initial_candidates=bad_guy, pdef=is_pdef, num_candidates=num_candidates, candidate_iters=candidate_iters, improvement_iters=improvement_iters, epsilon=target_convergence, max_levels=max_levels, max_coarse=max_coarse, aggregate=aggregation, prepostsmoother=relaxation, smooth=interp_smooth, strength=strength_connection, coarse_solver=coarse_solver, eliminate_local=(False, { 'Ca': 1.0 }), keep=keep_levels) asa_sol = ml_asa.solve(b, x0, tol, residuals=asa_residuals) end = time.clock() asa_time = end - start asa_conv_factors = np.zeros((len(asa_residuals) - 1, 1)) for i in range(0, len(asa_residuals) - 1):