def setUp(self): self.cases = [] # Poisson problems in 1D and 2D N = 20 self.cases.append((poisson((2*N,), format='csr'), rand(2*N,))) # 0 self.cases.append((poisson((N, N), format='csr'), rand(N*N,))) # 1 # Boxed examples A = load_example('recirc_flow')['A'].tocsr() # 2 self.cases.append((A, rand(A.shape[0],))) A = load_example('bar')['A'].tobsr(blocksize=(3, 3)) # 3 self.cases.append((A, rand(A.shape[0],)))
def test_nonhermitian(self): # problem data data = load_example('helmholtz_2D') A = data['A'].tocsr() B = data['B'] np.random.seed(28082572) x0 = np.random.rand(A.shape[0]) + 1.0j * np.random.rand(A.shape[0]) b = A * np.random.rand(A.shape[0]) + 1.0j * (A * np.random.rand(A.shape[0])) # solver parameters smooth = ('energy', {'krylov': 'gmres'}) SA_build_args = {'max_coarse': 25, 'coarse_solver': 'pinv', 'symmetry': 'symmetric'} SA_solve_args = {'cycle': 'V', 'maxiter': 20, 'tol': 1e-8} strength = [('evolution', {'k': 2, 'epsilon': 2.0})] smoother = ('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 1}) # Construct solver with nonsymmetric parameters sa = smoothed_aggregation_solver(A, B=B, smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, **SA_build_args) residuals = [] # stand-alone solve x = sa.solve(b, x0=x0, residuals=residuals, **SA_solve_args) residuals = np.array(residuals) avg_convergence_ratio =\ (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) assert(avg_convergence_ratio < 0.85) # accelerated solve residuals = [] x = sa.solve(b, x0=x0, residuals=residuals, accel='gmres', **SA_solve_args) del x residuals = np.array(residuals) avg_convergence_ratio =\ (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) assert(avg_convergence_ratio < 0.7) # test that nonsymmetric parameters give the same result as symmetric # parameters for the complex-symmetric matrix A strength = 'symmetric' SA_build_args['symmetry'] = 'nonsymmetric' sa_nonsymm = smoothed_aggregation_solver(A, B=np.ones((A.shape[0], 1)), smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None, **SA_build_args) SA_build_args['symmetry'] = 'symmetric' sa_symm = smoothed_aggregation_solver(A, B=np.ones((A.shape[0], 1)), smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None, **SA_build_args) for (symm_lvl, nonsymm_lvl) in zip(sa_nonsymm.levels, sa_symm.levels): assert_array_almost_equal(symm_lvl.A.toarray(), nonsymm_lvl.A.toarray())
def setUp(self): self.cases = [] # random matrices np.random.seed(954619597) for N in [2, 3, 5]: self.cases.append( sparse.csr_matrix(np.random.rand(N, N)) + sparse.csr_matrix(1.0j * np.random.rand(N, N))) # Poisson problems in 1D and 2D for N in [2, 3, 5, 7, 10, 11, 19]: A = poisson((N, ), format='csr') A.data = A.data + 1.0j * A.data self.cases.append(A) for N in [2, 3, 7, 9]: A = poisson((N, N), format='csr') A.data = A.data + 1.0j * np.random.rand(A.data.shape[0], ) self.cases.append(A) for name in ['knot', 'airfoil', 'bar']: ex = load_example(name) A = ex['A'].tocsr() A.data = A.data + 0.5j * np.random.rand(A.data.shape[0], ) self.cases.append(A)
def test_nonhermitian(self): # problem data data = load_example('helmholtz_2D') A = data['A'].tocsr() B = data['B'] numpy.random.seed(625) x0 = scipy.rand(A.shape[0]) + 1.0j * scipy.rand(A.shape[0]) b = A * scipy.rand(A.shape[0]) + 1.0j * (A * scipy.rand(A.shape[0])) # solver parameters smooth = ('energy', {'krylov': 'gmres'}) SA_build_args = {'max_coarse': 25, 'coarse_solver': 'pinv2', 'symmetry': 'symmetric'} SA_solve_args = {'cycle': 'V', 'maxiter': 20, 'tol': 1e-8} strength = [('evolution', {'k': 2, 'epsilon': 2.0})] smoother = ('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 1}) # Construct solver with nonsymmetric parameters sa = smoothed_aggregation_solver(A, B=B, smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, **SA_build_args) residuals = [] # stand-alone solve x = sa.solve(b, x0=x0, residuals=residuals, **SA_solve_args) residuals = array(residuals) avg_convergence_ratio =\ (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) assert(avg_convergence_ratio < 0.85) # accelerated solve residuals = [] x = sa.solve(b, x0=x0, residuals=residuals, accel='gmres', **SA_solve_args) del x residuals = array(residuals) avg_convergence_ratio =\ (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) assert(avg_convergence_ratio < 0.6) # test that nonsymmetric parameters give the same result as symmetric # parameters for the complex-symmetric matrix A strength = 'symmetric' SA_build_args['symmetry'] = 'nonsymmetric' sa_nonsymm = smoothed_aggregation_solver(A, B=ones((A.shape[0], 1)), smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None, **SA_build_args) SA_build_args['symmetry'] = 'symmetric' sa_symm = smoothed_aggregation_solver(A, B=ones((A.shape[0], 1)), smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None, **SA_build_args) for (symm_lvl, nonsymm_lvl) in zip(sa_nonsymm.levels, sa_symm.levels): assert_array_almost_equal(symm_lvl.A.todense(), nonsymm_lvl.A.todense())
def test_distance(self): data = load_example('airfoil') cases = [] cases.append((data['A'].tocsr(), data['vertices'])) for (A, V) in cases: dim = V.shape[1] for theta in [1.5, 2.0, 2.5]: cost = [0] lower_bound = 3 * dim + float(A.shape[0]) / A.nnz upper_bound = 3 * dim + 3 distance_strength_of_connection(A, V, theta=theta, relative_drop=True, cost=cost) assert (cost[0] >= lower_bound) assert (cost[0] <= upper_bound) for (A, V) in cases: for theta in [0.5, 1.0, 1.5]: cost = [0] lower_bound = 3 * dim + float(A.shape[0]) / A.nnz upper_bound = 3 * dim + 3 distance_strength_of_connection(A, V, theta=theta, relative_drop=False, cost=cost) assert (cost[0] >= lower_bound) assert (cost[0] <= upper_bound)
def test_nonsymmetric(self): # problem data data = load_example('recirc_flow') A = data['A'].tocsr() B = data['B'] numpy.random.seed(625) x0 = scipy.rand(A.shape[0]) b = A * scipy.rand(A.shape[0]) # solver parameters smooth = ('energy', {'krylov': 'gmres'}) SA_build_args = { 'max_coarse': 25, 'coarse_solver': 'pinv2', 'symmetry': 'nonsymmetric' } SA_solve_args = {'cycle': 'V', 'maxiter': 20, 'tol': 1e-8} strength = [('evolution', {'k': 2, 'epsilon': 8.0})] smoother = ('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 1}) improve_candidates = [('gauss_seidel_nr', { 'sweep': 'symmetric', 'iterations': 4 }), None] # Construct solver with nonsymmetric parameters sa = rootnode_solver(A, B=B, smooth=smooth, improve_candidates=improve_candidates, \ strength=strength, presmoother=smoother, postsmoother=smoother, **SA_build_args) residuals = [] # stand-alone solve x = sa.solve(b, x0=x0, residuals=residuals, **SA_solve_args) residuals = array(residuals) avg_convergence_ratio = (residuals[-1] / residuals[0])**(1.0 / len(residuals)) #print "Test 1 %1.3e, %1.3e" % (avg_convergence_ratio, 0.7) assert (avg_convergence_ratio < 0.7) # accelerated solve residuals = [] x = sa.solve(b, x0=x0, residuals=residuals, accel='gmres', **SA_solve_args) residuals = array(residuals) avg_convergence_ratio = (residuals[-1] / residuals[0])**(1.0 / len(residuals)) #print "Test 2 %1.3e, %1.3e" % (avg_convergence_ratio, 0.45) assert (avg_convergence_ratio < 0.45) # test that nonsymmetric parameters give the same result as symmetric parameters # for Poisson problem A = poisson((15, 15), format='csr') strength = 'symmetric' SA_build_args['symmetry'] = 'nonsymmetric' sa_nonsymm = rootnode_solver(A, B=ones((A.shape[0],1)), smooth=smooth, \ strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None,**SA_build_args) SA_build_args['symmetry'] = 'symmetric' sa_symm = rootnode_solver(A, B=ones((A.shape[0],1)), smooth=smooth, \ strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None,**SA_build_args) for (symm_lvl, nonsymm_lvl) in zip(sa_nonsymm.levels, sa_symm.levels): assert_array_almost_equal(symm_lvl.A.todense(), nonsymm_lvl.A.todense())
def test_nonsymmetric(self): # problem data data = load_example('recirc_flow') A = data['A'].tocsr() B = data['B'] numpy.random.seed(625) x0 = scipy.rand(A.shape[0]) b = A * scipy.rand(A.shape[0]) # solver parameters smooth = ('energy', {'krylov': 'gmres'}) SA_build_args = {'max_coarse': 25, 'coarse_solver': 'pinv2', 'symmetry': 'nonsymmetric'} SA_solve_args = {'cycle': 'V', 'maxiter': 20, 'tol': 1e-8} strength = [('evolution', {'k': 2, 'epsilon': 8.0})] smoother = ('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 1}) improve_candidates = [('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 4}), None] # Construct solver with nonsymmetric parameters sa = rootnode_solver(A, B=B, smooth=smooth, improve_candidates=improve_candidates, strength=strength, presmoother=smoother, postsmoother=smoother, **SA_build_args) residuals = [] # stand-alone solve x = sa.solve(b, x0=x0, residuals=residuals, **SA_solve_args) residuals = array(residuals) avg_convergence_ratio =\ (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) # print "Test 1 %1.3e, %1.3e" % (avg_convergence_ratio, 0.7) assert(avg_convergence_ratio < 0.7) # accelerated solve residuals = [] x = sa.solve(b, x0=x0, residuals=residuals, accel='gmres', **SA_solve_args) residuals = array(residuals) avg_convergence_ratio =\ (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) # print "Test 2 %1.3e, %1.3e" % (avg_convergence_ratio, 0.45) assert(avg_convergence_ratio < 0.45) # test that nonsymmetric parameters give the same result as symmetric # parameters for Poisson problem A = poisson((15, 15), format='csr') strength = 'symmetric' SA_build_args['symmetry'] = 'nonsymmetric' sa_nonsymm = rootnode_solver(A, B=ones((A.shape[0], 1)), smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None, **SA_build_args) SA_build_args['symmetry'] = 'symmetric' sa_symm = rootnode_solver(A, B=ones((A.shape[0], 1)), smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None, **SA_build_args) for (symm_lvl, nonsymm_lvl) in zip(sa_nonsymm.levels, sa_symm.levels): assert_array_almost_equal(symm_lvl.A.todense(), nonsymm_lvl.A.todense())
def test_nonhermitian(self): # problem data data = load_example("helmholtz_2D") A = data["A"].tocsr() B = data["B"] numpy.random.seed(625) x0 = scipy.rand(A.shape[0]) + 1.0j * scipy.rand(A.shape[0]) b = A * scipy.rand(A.shape[0]) + 1.0j * (A * scipy.rand(A.shape[0])) # solver parameters smooth = ("energy", {"krylov": "gmres"}) SA_build_args = {"max_coarse": 25, "coarse_solver": "pinv2", "symmetry": "symmetric"} SA_solve_args = {"cycle": "V", "maxiter": 20, "tol": 1e-8} strength = [("evolution", {"k": 2, "epsilon": 2.0})] smoother = ("gauss_seidel_nr", {"sweep": "symmetric", "iterations": 1}) # Construct solver with nonsymmetric parameters sa = smoothed_aggregation_solver( A, B=B, smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, **SA_build_args ) residuals = [] # stand-alone solve x = sa.solve(b, x0=x0, residuals=residuals, **SA_solve_args) residuals = array(residuals) avg_convergence_ratio = (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) assert avg_convergence_ratio < 0.85 # accelerated solve residuals = [] x = sa.solve(b, x0=x0, residuals=residuals, accel="gmres", **SA_solve_args) residuals = array(residuals) avg_convergence_ratio = (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) assert avg_convergence_ratio < 0.6 # test that nonsymmetric parameters give the same result as symmetric # parameters for the complex-symmetric matrix A strength = "symmetric" SA_build_args["symmetry"] = "nonsymmetric" sa_nonsymm = smoothed_aggregation_solver( A, B=ones((A.shape[0], 1)), smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None, **SA_build_args ) SA_build_args["symmetry"] = "symmetric" sa_symm = smoothed_aggregation_solver( A, B=ones((A.shape[0], 1)), smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None, **SA_build_args ) for (symm_lvl, nonsymm_lvl) in zip(sa_nonsymm.levels, sa_symm.levels): assert_array_almost_equal(symm_lvl.A.todense(), nonsymm_lvl.A.todense())
def setUp(self): self.cases = [] # Poisson problems in 1D and 2D for N in [2, 3, 5, 7, 10, 11, 19]: self.cases.append(poisson((N,), format='csr')) for N in [2, 3, 7, 9]: self.cases.append(poisson((N, N), format='csr')) for name in ['knot', 'airfoil', 'bar']: ex = load_example(name) self.cases.append(ex['A'].tocsr())
def setUp(self): self.cases = [] # Poisson problems in 1D and 2D for N in [2, 3, 5, 7, 10, 11, 19]: self.cases.append(poisson((N, ), format='csr')) for N in [2, 3, 7, 9]: self.cases.append(poisson((N, N), format='csr')) for name in ['knot', 'airfoil', 'bar']: ex = load_example(name) self.cases.append(ex['A'].tocsr())
def test_prefilter(self): """Check that using prefilter reduces NNZ in P""" np.random.seed(0) # make tests repeatable cases = [] # Simple, real-valued diffusion problems X = load_example('airfoil') A = X['A'].tocsr() B = X['B'] cases.append((A, B, ('energy', {'krylov': 'cg', 'degree': 2, 'maxiter': 3}), {'theta': 0.05})) cases.append((A, B, ('energy', {'krylov': 'gmres', 'degree': 2, 'maxiter': 3}), {'k': 3})) cases.append((A.tobsr(blocksize=(2, 2)), np.hstack((B, np.random.rand(B.shape[0], 1))), ('energy', {'krylov': 'cg', 'degree': 2, 'maxiter': 3}), {'theta': 0.1})) # Simple, imaginary-valued problems iA = 1.0j * A iB = 1.0 + np.random.rand(iA.shape[0], 2)\ + 1.0j * (1.0 + np.random.rand(iA.shape[0], 2)) cases.append((iA, B, ('energy', {'krylov': 'cg', 'degree': 2, 'maxiter': 3}), {'theta': 0.05})) cases.append((iA, iB, ('energy', {'krylov': 'gmres', 'degree': 2, 'maxiter': 3}), {'k': 3})) cases.append((A.tobsr(blocksize=(2, 2)), np.hstack((B, np.random.rand(B.shape[0], 1))), ('energy', {'krylov': 'cg', 'degree': 2, 'maxiter': 3}), {'theta': 0.1})) for A, B, smooth, prefilter in cases: ml_nofilter = rootnode_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth, keep=True) smooth[1]['prefilter'] = prefilter ml_filter = rootnode_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth, keep=True) assert_equal(ml_nofilter.levels[0].P.nnz > ml_filter.levels[0].P.nnz, True)
def setUp(self): self.cases = [] # random matrices np.random.seed(0) for N in [2, 3, 5]: self.cases.append(csr_matrix(sp.rand(N, N))) # Poisson problems in 1D and 2D for N in [2, 3, 5, 7, 10, 11, 19]: self.cases.append(poisson((N, ), format='csr')) for N in [2, 3, 5, 7, 10, 11]: self.cases.append(poisson((N, N), format='csr')) for name in ['knot', 'airfoil', 'bar']: ex = load_example(name) self.cases.append(ex['A'].tocsr())
def setUp(self): self.cases = [] # random matrices np.random.seed(0) for N in [2, 3, 5]: self.cases.append(csr_matrix(np.random.rand(N, N))) # Poisson problems in 1D and 2D for N in [2, 3, 5, 7, 10, 11, 19]: self.cases.append(poisson((N,), format='csr')) for N in [2, 3, 5, 7, 8]: self.cases.append(poisson((N, N), format='csr')) for name in ['knot', 'airfoil', 'bar']: ex = load_example(name) self.cases.append(ex['A'].tocsr())
def test_prefilter(self): """Check that using prefilter reduces NNZ in P""" np.random.seed(0) # make tests repeatable cases = [] # Simple, real-valued diffusion problems X = load_example("airfoil") A = X["A"].tocsr() B = X["B"] cases.append((A, B, ("energy", {"krylov": "cg", "degree": 2, "maxiter": 3}), {"theta": 0.05})) cases.append((A, B, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3}), {"k": 3})) cases.append( ( A.tobsr(blocksize=(2, 2)), np.hstack((B, np.random.rand(B.shape[0], 1))), ("energy", {"krylov": "cg", "degree": 2, "maxiter": 3}), {"theta": 0.1}, ) ) # Simple, imaginary-valued problems iA = 1.0j * A iB = 1.0 + np.random.rand(iA.shape[0], 2) + 1.0j * (1.0 + np.random.rand(iA.shape[0], 2)) cases.append((iA, B, ("energy", {"krylov": "cg", "degree": 2, "maxiter": 3}), {"theta": 0.05})) cases.append((iA, iB, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3}), {"k": 3})) cases.append( ( A.tobsr(blocksize=(2, 2)), np.hstack((B, np.random.rand(B.shape[0], 1))), ("energy", {"krylov": "cg", "degree": 2, "maxiter": 3}), {"theta": 0.1}, ) ) for A, B, smooth, prefilter in cases: ml_nofilter = rootnode_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth, keep=True) smooth[1]["prefilter"] = prefilter ml_filter = rootnode_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth, keep=True) assert_equal(ml_nofilter.levels[0].P.nnz > ml_filter.levels[0].P.nnz, True)
def test_distance_strength_of_connection(self): data = load_example('airfoil') cases = [] cases.append((data['A'].tocsr(), data['vertices'])) for (A, V) in cases: for theta in [1.5, 2.0, 2.5]: result = distance_soc(A, V, theta=theta) expected = reference_distance_soc(A, V, theta=theta) assert_equal(result.nnz, expected.nnz) assert_array_almost_equal(result.todense(), expected.todense()) for (A, V) in cases: for theta in [0.5, 1.0, 1.5]: result = distance_soc(A, V, theta=theta, relative_drop=False) expected = reference_distance_soc(A, V, theta=theta, relative_drop=False) assert_equal(result.nnz, expected.nnz) assert_array_almost_equal(result.todense(), expected.todense())
def setUp(self): self.cases = [] # random matrices numpy.random.seed(0) for N in [2,3,5]: self.cases.append( csr_matrix(rand(N,N)) + csr_matrix(1.0j*rand(N,N))) # Poisson problems in 1D and 2D for N in [2,3,5,7,10,11,19]: A = poisson( (N,), format='csr'); A.data = A.data + 1.0j*A.data; self.cases.append(A) for N in [2,3,7,9]: A = poisson( (N,N), format='csr'); A.data = A.data + 1.0j*rand(A.data.shape[0],); self.cases.append(A) for name in ['knot','airfoil','bar']: ex = load_example(name) A = ex['A'].tocsr(); A.data = A.data + 0.5j*rand(A.data.shape[0],); self.cases.append(A)
def setUp(self): self.cases = [] # # Random matrices, cases 0-2 np.random.seed(0) for N in [2, 3, 5]: self.cases.append(csr_matrix(np.random.rand(N, N))) # Poisson problems in 1D, cases 3-9 for N in [2, 3, 5, 7, 10, 11, 19]: self.cases.append(poisson((N,), format="csr")) # Poisson problems in 2D, cases 10-15 for N in [2, 3, 5, 7, 10, 11]: self.cases.append(poisson((N, N), format="csr")) for name in ["knot", "airfoil", "bar"]: ex = load_example(name) self.cases.append(ex["A"].tocsr())
def setUp(self): cases = [] seed(0) for i in range(5): A = rand(8, 8) > 0.5 cases.append(canonical_graph(A + A.T).astype(float)) cases.append(zeros((1, 1))) cases.append(zeros((2, 2))) cases.append(zeros((8, 8))) cases.append(ones((2, 2)) - eye(2)) cases.append(poisson((5,))) cases.append(poisson((5, 5))) cases.append(poisson((11, 11))) cases.append(poisson((5, 5, 5))) for name in ['airfoil', 'bar', 'knot']: cases.append(load_example(name)['A']) cases = [canonical_graph(G) for G in cases] self.cases = cases
def setUp(self): cases = [] np.random.seed(651978631) for i in range(5): A = np.random.rand(8, 8) > 0.5 cases.append(canonical_graph(A + A.T).astype(float)) cases.append(np.zeros((1, 1))) cases.append(np.zeros((2, 2))) cases.append(np.zeros((8, 8))) cases.append(np.ones((2, 2)) - np.eye(2)) cases.append(poisson((5, ))) cases.append(poisson((5, 5))) cases.append(poisson((11, 11))) cases.append(poisson((5, 5, 5))) for name in ['airfoil', 'bar', 'knot']: cases.append(load_example(name)['A']) cases = [canonical_graph(G) for G in cases] self.cases = cases
def test_distance(self): data = load_example('airfoil') cases = [] cases.append((data['A'].tocsr(), data['vertices'])) for (A, V) in cases: dim = V.shape[1] for theta in [1.5, 2.0, 2.5]: cost = [0] lower_bound = 3*dim + float(A.shape[0]) / A.nnz upper_bound = 3*dim + 3 distance_strength_of_connection(A, V, theta=theta, relative_drop=True, cost=cost) assert(cost[0] >= lower_bound) assert(cost[0] <= upper_bound) for (A, V) in cases: for theta in [0.5, 1.0, 1.5]: cost = [0] lower_bound = 3*dim + float(A.shape[0]) / A.nnz upper_bound = 3*dim + 3 distance_strength_of_connection(A, V, theta=theta, relative_drop=False, cost=cost) assert(cost[0] >= lower_bound) assert(cost[0] <= upper_bound)
def test_range(self): """Check that P*R=B""" np.random.seed(0) # make tests repeatable cases = [] # Simple, real-valued diffusion problems X = load_example("airfoil") A = X["A"].tocsr() B = X["B"] cases.append((A, B, ("jacobi", {"filter": True, "weighting": "local"}))) cases.append((A, B, ("jacobi", {"filter": True, "weighting": "block"}))) cases.append((A, B, ("energy", {"maxiter": 3}))) cases.append((A, B, ("energy", {"krylov": "cgnr"}))) cases.append((A, B, ("energy", {"krylov": "gmres", "degree": 2}))) A = poisson((10, 10), format="csr") B = np.ones((A.shape[0], 1)) cases.append((A, B, ("jacobi", {"filter": True, "weighting": "diagonal"}))) cases.append((A, B, ("jacobi", {"filter": True, "weighting": "local"}))) cases.append((A, B, "energy")) cases.append((A, B, ("energy", {"degree": 2}))) cases.append((A, B, ("energy", {"krylov": "cgnr", "degree": 2}))) cases.append((A, B, ("energy", {"krylov": "gmres"}))) # Simple, imaginary-valued problems iA = 1.0j * A iB = 1.0 + np.random.rand(iA.shape[0], 2) + 1.0j * (1.0 + np.random.rand(iA.shape[0], 2)) cases.append((iA, B, ("jacobi", {"filter": True, "weighting": "diagonal"}))) cases.append((iA, B, ("jacobi", {"filter": True, "weighting": "block"}))) cases.append((iA, iB, ("jacobi", {"filter": True, "weighting": "local"}))) cases.append((iA, iB, ("jacobi", {"filter": True, "weighting": "block"}))) cases.append((iA.tobsr(blocksize=(5, 5)), B, ("jacobi", {"filter": True, "weighting": "block"}))) cases.append((iA.tobsr(blocksize=(5, 5)), iB, ("jacobi", {"filter": True, "weighting": "block"}))) cases.append((iA, B, ("energy", {"krylov": "cgnr", "degree": 2}))) cases.append((iA, iB, ("energy", {"krylov": "cgnr"}))) cases.append( ( iA.tobsr(blocksize=(5, 5)), B, ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3, "postfilter": {"theta": 0.05}}), ) ) cases.append( ( iA.tobsr(blocksize=(5, 5)), B, ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3, "prefilter": {"theta": 0.05}}), ) ) cases.append((iA.tobsr(blocksize=(5, 5)), B, ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3}))) cases.append((iA.tobsr(blocksize=(5, 5)), iB, ("energy", {"krylov": "cgnr"}))) cases.append((iA, B, ("energy", {"krylov": "gmres"}))) cases.append((iA, iB, ("energy", {"krylov": "gmres", "degree": 2}))) cases.append((iA.tobsr(blocksize=(5, 5)), B, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3}))) cases.append((iA.tobsr(blocksize=(5, 5)), iB, ("energy", {"krylov": "gmres"}))) # Simple, imaginary-valued problems iA = A + 1.0j * scipy.sparse.eye(A.shape[0], A.shape[1]) cases.append((iA, B, ("jacobi", {"filter": True, "weighting": "local"}))) cases.append((iA, B, ("jacobi", {"filter": True, "weighting": "block"}))) cases.append((iA, iB, ("jacobi", {"filter": True, "weighting": "diagonal"}))) cases.append((iA, iB, ("jacobi", {"filter": True, "weighting": "block"}))) cases.append((iA.tobsr(blocksize=(4, 4)), iB, ("jacobi", {"filter": True, "weighting": "block"}))) cases.append((iA, B, ("energy", {"krylov": "cgnr"}))) cases.append((iA.tobsr(blocksize=(4, 4)), iB, ("energy", {"krylov": "cgnr"}))) cases.append((iA, B, ("energy", {"krylov": "gmres"}))) cases.append((iA.tobsr(blocksize=(4, 4)), iB, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3}))) cases.append( ( iA.tobsr(blocksize=(4, 4)), iB, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3, "postfilter": {"theta": 0.05}}), ) ) cases.append( ( iA.tobsr(blocksize=(4, 4)), iB, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3, "prefilter": {"theta": 0.05}}), ) ) A = gauge_laplacian(10, spacing=1.0, beta=0.21) B = np.ones((A.shape[0], 1)) cases.append((A, iB, ("jacobi", {"filter": True, "weighting": "diagonal"}))) cases.append((A, iB, ("jacobi", {"filter": True, "weighting": "local"}))) cases.append((A, B, ("energy", {"krylov": "cg"}))) cases.append((A, iB, ("energy", {"krylov": "cgnr"}))) cases.append((A, iB, ("energy", {"krylov": "gmres"}))) cases.append( ( A.tobsr(blocksize=(2, 2)), B, ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3, "postfilter": {"theta": 0.05}}), ) ) cases.append( ( A.tobsr(blocksize=(2, 2)), B, ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3, "prefilter": {"theta": 0.05}}), ) ) cases.append((A.tobsr(blocksize=(2, 2)), B, ("energy", {"krylov": "cgnr", "degree": 2, "maxiter": 3}))) cases.append((A.tobsr(blocksize=(2, 2)), iB, ("energy", {"krylov": "cg"}))) cases.append((A.tobsr(blocksize=(2, 2)), B, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3}))) cases.append( ( A.tobsr(blocksize=(2, 2)), B, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3, "postfilter": {"theta": 0.05}}), ) ) cases.append( ( A.tobsr(blocksize=(2, 2)), B, ("energy", {"krylov": "gmres", "degree": 2, "maxiter": 3, "prefilter": {"theta": 0.05}}), ) ) # A, B = linear_elasticity((10, 10)) cases.append((A, B, ("jacobi", {"filter": True, "weighting": "diagonal"}))) cases.append((A, B, ("jacobi", {"filter": True, "weighting": "local"}))) cases.append((A, B, ("jacobi", {"filter": True, "weighting": "block"}))) cases.append((A, B, ("energy", {"degree": 2}))) cases.append((A, B, ("energy", {"degree": 3, "postfilter": {"theta": 0.05}}))) cases.append((A, B, ("energy", {"degree": 3, "prefilter": {"theta": 0.05}}))) cases.append((A, B, ("energy", {"krylov": "cgnr"}))) cases.append((A, B, ("energy", {"krylov": "gmres", "degree": 2}))) # Classic SA cases for A, B, smooth in cases: ml = smoothed_aggregation_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth) P = ml.levels[0].P B = ml.levels[0].B R = ml.levels[1].B assert_almost_equal(P * R, B) def blocksize(A): # Helper Function: return the blocksize of a matrix if isspmatrix_bsr(A): return A.blocksize[0] else: return 1 # Root-node cases counter = 0 for A, B, smooth in cases: counter += 1 if isinstance(smooth, tuple): smoother = smooth[0] else: smoother = smooth if smoother == "energy" and (B.shape[1] >= blocksize(A)): ic = [("gauss_seidel_nr", {"sweep": "symmetric", "iterations": 4}), None] ml = rootnode_solver( A, B=B, max_coarse=1, max_levels=2, smooth=smooth, improve_candidates=ic, keep=True, symmetry="nonsymmetric", ) T = ml.levels[0].T.tocsr() Cpts = ml.levels[0].Cpts Bf = ml.levels[0].B Bf_H = ml.levels[0].BH Bc = ml.levels[1].B P = ml.levels[0].P.tocsr() # P should preserve B in its range, wherever P # has enough nonzeros mask = (P.indptr[1:] - P.indptr[:-1]) >= B.shape[1] assert_almost_equal((P * Bc)[mask, :], Bf[mask, :]) assert_almost_equal((P * Bc)[mask, :], Bf_H[mask, :]) # P should be the identity at Cpts I1 = eye(T.shape[1], T.shape[1], format="csr", dtype=T.dtype) I2 = P[Cpts, :] assert_almost_equal(I1.data, I2.data) assert_equal(I1.indptr, I2.indptr) assert_equal(I1.indices, I2.indices) # T should be the identity at Cpts I2 = T[Cpts, :] assert_almost_equal(I1.data, I2.data) assert_equal(I1.indptr, I2.indptr) assert_equal(I1.indices, I2.indices)
def setUp(self): cases = [None for i in range(5)] cluster_node_incidence_input = [None for i in range(5)] cluster_node_incidence_output = [None for i in range(5)] cluster_center_input = [None for i in range(5)] cluster_center_output = [None for i in range(5)] bellman_ford_input = [None for i in range(5)] bellman_ford_output = [None for i in range(5)] # bellman_ford_balanced_input = [None for i in range(5)] # bellman_ford_balanced_output = [None for i in range(5)] lloyd_cluster_input = [None for i in range(5)] lloyd_cluster_output = [None for i in range(5)] lloyd_cluster_exact_input = [None for i in range(5)] lloyd_cluster_exact_output = [None for i in range(5)] # (0) 6 node undirected, unit length # (1) 12 node undirected, unit length # (2) 16 node undirected, random length # (3) 16 node directed, random length # (4) 191 node unstructured finite element matrix # (0) 6 node undirected, unit length # # [3] ---- [4] ---- [5] # | \ / | \ / | # | / \ | / \ | # [0] ---- [1] ---- [2] xy = np.array([[0, 0], [1, 0], [2, 0], [0, 1], [1, 1], [2, 1]]) del xy G = np.zeros((6, 6)) G[0, [1, 3, 4]] = 1 G[1, [0, 2, 3, 4, 5]] = 1 G[2, [1, 4, 5]] = 1 G[3, [0, 1, 4]] = 1 G[4, [0, 1, 2, 3, 5]] = 1 G[5, [1, 2, 4]] = 1 G[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]] = [3, 5, 3, 3, 5, 3] G = sparse.csr_matrix(G) cases[0] = (G) cm = np.array([0, 1, 1, 0, 0, 1], dtype=np.int32) ICp = np.array([0, 3, 6], dtype=np.int32) ICi = np.array([0, 3, 4, 1, 2, 5], dtype=np.int32) L = np.array([0, 0, 1, 1, 2, 2], dtype=np.int32) cluster_node_incidence_input[0] = {'num_clusters': 2, 'cm': cm} cluster_node_incidence_output[0] = {'ICp': ICp, 'ICi': ICi, 'L': L} cluster_center_input[0] = { 'a': [0, 1], 'num_clusters': 2, 'cm': np.array([0, 1, 1, 0, 0, 1], dtype=np.int32), 'ICp': np.array([0, 3, 6], dtype=np.int32), 'ICi': np.array([0, 3, 4, 1, 2, 5], dtype=np.int32), 'L': np.array([0, 0, 1, 1, 2, 2], dtype=np.int32) } cluster_center_output[0] = [0, 1] bellman_ford_input[0] = {'seeds': [0, 5]} bellman_ford_output[0] = { 'cm': np.array([0, 0, 1, 0, 0, 1], dtype=np.int32), 'd': np.array([0., 1., 1., 1., 1., 0.], dtype=G.dtype) } lloyd_cluster_input[0] = {'seeds': np.array([0, 5], dtype=np.int32)} lloyd_cluster_output[0] = { 'cm': np.array([0, 0, 1, 0, 0, 1], dtype=np.int32), 'd': np.array([1., 0., 0., 1., 0., 0.], dtype=G.dtype), 'c': np.array([0, 5], dtype=np.int32) } lloyd_cluster_exact_input[0] = { 'seeds': np.array([0, 5], dtype=np.int32) } lloyd_cluster_exact_output[0] = { 'cm': np.array([0, 0, 1, 0, 1, 1], dtype=np.int32), 'd': np.array([0, 1, 1, 1, 1, 0], dtype=G.dtype), 'c': np.array([0, 2], dtype=np.int32) } # (1) 12 node undirected, unit length # # _[1] ---- [7] # / | | # / | | # [0] ---- [2] ---- [6] ---- [8] # | | | # | | | # [3] ---- [5] ---- [9] _ # | | \ # | | \ # [4] ---- [10]---- [11] xy = np.array([[0, 2], [1, 3], [1, 2], [1, 1], [2, 0], [2, 1], [2, 2], [2, 3], [3, 2], [3, 1], [3, 0], [4, 0]]) del xy G = np.zeros((12, 12)) G[0, [1, 2]] = 1 G[1, [0, 2, 7]] = 1 G[2, [0, 1, 3, 6]] = 1 G[3, [2, 5]] = 1 G[4, [5, 10]] = 1 G[5, [3, 4, 6, 9]] = 1 G[6, [2, 5, 7, 8]] = 1 G[7, [1, 6]] = 1 G[8, [6, 9]] = 1 G[9, [5, 8, 10, 11]] = 1 G[10, [4, 9, 11]] = 1 G[11, [9, 10]] = 1 G[np.arange(12), np.arange(12)] = [2, 3, 4, 2, 2, 4, 4, 2, 2, 4, 3, 2] G = sparse.csr_matrix(G) cases.append(G) cm = np.array([0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1], dtype=np.int32) ICp = np.array([0, 6, 12], dtype=np.int32) ICi = np.array([0, 1, 2, 3, 6, 7, 4, 5, 8, 9, 10, 11], dtype=np.int32) L = np.array([0, 1, 2, 3, 0, 1, 4, 5, 2, 3, 4, 5], dtype=np.int32) cluster_node_incidence_input.append({'num_clusters': 2, 'cm': cm}) cluster_node_incidence_output.append({'ICp': ICp, 'ICi': ICi, 'L': L}) cluster_center_input[0] = { 'a': [0, 1], 'num_clusters': 2, 'cm': np.array([0, 1, 1, 0, 0, 1], dtype=np.int32), 'ICp': np.array([0, 3, 6], dtype=np.int32), 'ICi': np.array([0, 3, 4, 1, 2, 5], dtype=np.int32), 'L': np.array([0, 0, 1, 1, 2, 2], dtype=np.int32) } cluster_center_output[0] = [0, 1] # (2) 16 node undirected, random length (0,2) np.random.seed(2244369509) G.data[:] = np.random.rand(len(G.data)) * 2 cases.append(G) # (3) 16 node directed, random length # >[1] ---> [7] # / | | # / v v # [0] <--- [2] <--- [6] <--- [8] # | ^ ^ # v | | # [3] ---> [5] <--- [9] < # | ^ \ # v | \ # [4] ---> [10]---> [11] xy = np.array([[0, 2], [1, 3], [1, 2], [1, 1], [2, 0], [2, 1], [2, 2], [2, 3], [3, 2], [3, 1], [3, 0], [4, 0]]) del xy G = np.zeros((12, 12)) G[0, [1]] = 1 G[1, [2, 7]] = 1 G[2, [0, 3]] = 1 G[3, [5]] = 1 G[4, [10]] = 1 G[5, [4, 6]] = 1 G[6, [2]] = 1 G[7, [6]] = 1 G[8, [6]] = 1 G[9, [5, 8]] = 1 G[10, [9, 11]] = 1 G[11, [9]] = 1 G = sparse.csr_matrix(G) np.random.seed(1664236979) G.data[:] = np.random.rand(len(G.data)) * 2 cases.append(G) # (4) 191 node unstructured finite element matrix cases.append(load_example('unit_square')['A']) self.cases = cases self.cluster_node_incidence_input = cluster_node_incidence_input self.cluster_node_incidence_output = cluster_node_incidence_output self.cluster_center_input = cluster_center_input self.cluster_center_output = cluster_center_output self.bellman_ford_input = bellman_ford_input self.bellman_ford_output = bellman_ford_output # self.bellman_ford_balanced_input = bellman_ford_balanced_input # self.bellman_ford_balanced_output = bellman_ford_balanced_output self.lloyd_cluster_input = lloyd_cluster_input self.lloyd_cluster_output = lloyd_cluster_output self.lloyd_cluster_exact_input = lloyd_cluster_exact_input self.lloyd_cluster_exact_output = lloyd_cluster_exact_output
def test_range(self): """Check that P*R=B.""" warnings.filterwarnings('ignore', category=UserWarning, message='Having less target vectors') np.random.seed(18410243) # make tests repeatable cases = [] # Simple, real-valued diffusion problems name = 'airfoil' X = load_example('airfoil') A = X['A'].tocsr() B = X['B'] cases.append((A, B, ('jacobi', {'filter_entries': True, 'weighting': 'local'}), name)) cases.append((A, B, ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name)) cases.append((A, B, ('energy', {'maxiter': 3}), name)) cases.append((A, B, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name)) cases.append((A, B, ('energy', {'krylov': 'gmres', 'degree': 2}), name)) name = 'poisson' A = poisson((10, 10), format='csr') B = np.ones((A.shape[0], 1)) cases.append((A, B, ('jacobi', {'filter_entries': True, 'weighting': 'diagonal'}), name)) cases.append((A, B, ('jacobi', {'filter_entries': True, 'weighting': 'local'}), name)) cases.append((A, B, 'energy', name)) cases.append((A, B, ('energy', {'degree': 2}), name)) cases.append((A, B, ('energy', {'krylov': 'cgnr', 'degree': 2, 'weighting': 'diagonal'}), name)) cases.append((A, B, ('energy', {'krylov': 'gmres'}), name)) # Simple, imaginary-valued problems name = 'random imaginary' iA = 1.0j * A iB = 1.0 + np.random.rand(iA.shape[0], 2)\ + 1.0j * (1.0 + np.random.rand(iA.shape[0], 2)) cases.append((iA, B, ('jacobi', {'filter_entries': True, 'weighting': 'diagonal'}), name)) cases.append((iA, B, ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name)) cases.append((iA, iB, ('jacobi', {'filter_entries': True, 'weighting': 'local'}), name)) cases.append((iA, iB, ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name)) cases.append((iA.tobsr(blocksize=(5, 5)), B, ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name)) cases.append((iA.tobsr(blocksize=(5, 5)), iB, ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name)) cases.append((iA, B, ('energy', {'krylov': 'cgnr', 'degree': 2, 'weighting': 'diagonal'}), name)) cases.append((iA, iB, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name)) cases.append((iA.tobsr(blocksize=(5, 5)), B, ('energy', {'krylov': 'cgnr', 'degree': 2, 'maxiter': 3, 'weighting': 'diagonal', 'postfilter': {'theta': 0.05}}), name)) cases.append((iA.tobsr(blocksize=(5, 5)), B, ('energy', {'krylov': 'cgnr', 'degree': 2, 'maxiter': 3, 'weighting': 'diagonal', 'prefilter': {'theta': 0.05}}), name)) cases.append((iA.tobsr(blocksize=(5, 5)), B, ('energy', {'krylov': 'cgnr', 'degree': 2, 'weighting': 'diagonal', 'maxiter': 3}), name)) cases.append((iA.tobsr(blocksize=(5, 5)), iB, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name)) cases.append((iA, B, ('energy', {'krylov': 'gmres'}), name)) cases.append((iA, iB, ('energy', {'krylov': 'gmres', 'degree': 2}), name)) cases.append((iA.tobsr(blocksize=(5, 5)), B, ('energy', {'krylov': 'gmres', 'degree': 2, 'maxiter': 3}), name)) cases.append((iA.tobsr(blocksize=(5, 5)), iB, ('energy', {'krylov': 'gmres'}), name)) # Simple, imaginary-valued problems name = 'random imaginary + I' iA = A + 1.0j * sparse.eye(A.shape[0], A.shape[1]) cases.append((iA, B, ('jacobi', {'filter_entries': True, 'weighting': 'local'}), name)) cases.append((iA, B, ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name)) cases.append((iA, iB, ('jacobi', {'filter_entries': True, 'weighting': 'diagonal'}), name)) cases.append((iA, iB, ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name)) cases.append((iA.tobsr(blocksize=(4, 4)), iB, ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name)) cases.append((iA, B, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name)) cases.append((iA.tobsr(blocksize=(4, 4)), iB, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name)) cases.append((iA, B, ('energy', {'krylov': 'gmres'}), name)) cases.append((iA.tobsr(blocksize=(4, 4)), iB, ('energy', {'krylov': 'gmres', 'degree': 2, 'maxiter': 3}), name)) cases.append((iA.tobsr(blocksize=(4, 4)), iB, ('energy', {'krylov': 'gmres', 'degree': 2, 'maxiter': 3, 'postfilter': {'theta': 0.05}}), name)) cases.append((iA.tobsr(blocksize=(4, 4)), iB, ('energy', {'krylov': 'gmres', 'degree': 2, 'maxiter': 3, 'prefilter': {'theta': 0.05}}), name)) name = 'gauge laplacian' A = gauge_laplacian(10, spacing=1.0, beta=0.21) B = np.ones((A.shape[0], 1)) cases.append((A, iB, ('jacobi', {'filter_entries': True, 'weighting': 'diagonal'}), name)) cases.append((A, iB, ('jacobi', {'filter_entries': True, 'weighting': 'local'}), name)) cases.append((A, B, ('energy', {'krylov': 'cg'}), name)) cases.append((A, iB, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name)) cases.append((A, iB, ('energy', {'krylov': 'gmres'}), name)) name = 'gauge laplacian bsr' cases.append((A.tobsr(blocksize=(2, 2)), B, ('energy', {'krylov': 'cgnr', 'degree': 2, 'weighting': 'diagonal', 'maxiter': 3, 'postfilter': {'theta': 0.05}}), name)) cases.append((A.tobsr(blocksize=(2, 2)), B, ('energy', {'krylov': 'cgnr', 'degree': 2, 'weighting': 'diagonal', 'maxiter': 3, 'prefilter': {'theta': 0.05}}), name)) cases.append((A.tobsr(blocksize=(2, 2)), B, ('energy', {'krylov': 'cgnr', 'degree': 2, 'maxiter': 3, 'weighting': 'diagonal'}), name)) cases.append((A.tobsr(blocksize=(2, 2)), iB, ('energy', {'krylov': 'cg'}), name)) cases.append((A.tobsr(blocksize=(2, 2)), B, ('energy', {'krylov': 'gmres', 'degree': 2, 'maxiter': 3}), name)) cases.append((A.tobsr(blocksize=(2, 2)), B, ('energy', {'krylov': 'gmres', 'degree': 2, 'maxiter': 3, 'postfilter': {'theta': 0.05}}), name)) cases.append((A.tobsr(blocksize=(2, 2)), B, ('energy', {'krylov': 'gmres', 'degree': 2, 'maxiter': 3, 'prefilter': {'theta': 0.05}}), name)) # name = 'linear elasticity' A, B = linear_elasticity((10, 10)) cases.append((A, B, ('jacobi', {'filter_entries': True, 'weighting': 'diagonal'}), name)) cases.append((A, B, ('jacobi', {'filter_entries': True, 'weighting': 'local'}), name)) cases.append((A, B, ('jacobi', {'filter_entries': True, 'weighting': 'block'}), name)) cases.append((A, B, ('energy', {'degree': 2}), name)) cases.append((A, B, ('energy', {'degree': 3, 'postfilter': {'theta': 0.05}}), name)) cases.append((A, B, ('energy', {'degree': 3, 'prefilter': {'theta': 0.05}}), name)) cases.append((A, B, ('energy', {'krylov': 'cgnr', 'weighting': 'diagonal'}), name)) cases.append((A, B, ('energy', {'krylov': 'gmres', 'degree': 2}), name)) # Classic SA cases for A, B, smooth, _name in cases: ml = smoothed_aggregation_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth) P = ml.levels[0].P B = ml.levels[0].B R = ml.levels[1].B assert_almost_equal(P * R, B) def _get_blocksize(A): # Helper Function: return the blocksize of a matrix if sparse.isspmatrix_bsr(A): return A.blocksize[0] return 1 # Root-node cases counter = 0 for A, B, smooth, _name in cases: counter += 1 if isinstance(smooth, tuple): smoother = smooth[0] else: smoother = smooth if smoother == 'energy' and (B.shape[1] >= _get_blocksize(A)): ic = [('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 4}), None] ml = rootnode_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth, improve_candidates=ic, keep=True, symmetry='nonsymmetric') T = ml.levels[0].T.tocsr() Cpts = ml.levels[0].Cpts Bf = ml.levels[0].B Bf_H = ml.levels[0].BH Bc = ml.levels[1].B P = ml.levels[0].P.tocsr() T.eliminate_zeros() P.eliminate_zeros() # P should preserve B in its range, wherever P # has enough nonzeros mask = ((P.indptr[1:] - P.indptr[:-1]) >= B.shape[1]) assert_almost_equal((P*Bc)[mask, :], Bf[mask, :]) assert_almost_equal((P*Bc)[mask, :], Bf_H[mask, :]) # P should be the identity at Cpts I1 = sparse.eye(T.shape[1], T.shape[1], format='csr', dtype=T.dtype) I2 = P[Cpts, :] assert_almost_equal(I1.data, I2.data) assert_equal(I1.indptr, I2.indptr) assert_equal(I1.indices, I2.indices) # T should be the identity at Cpts I2 = T[Cpts, :] assert_almost_equal(I1.data, I2.data) assert_equal(I1.indptr, I2.indptr) assert_equal(I1.indices, I2.indices)
# 3D example of viewing aggregates from SA using VTK from pyamg.aggregation import standard_aggregation from pyamg.vis import vis_coarse, vtk_writer from pyamg.gallery import load_example # retrieve the problem data = load_example('unit_cube') A = data['A'].tocsr() V = data['vertices'] E2V = data['elements'] # perform smoothed aggregation Agg, rootnodes = standard_aggregation(A) # create the vtk file of aggregates vis_coarse.vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, \ mesh_type='tet', output='vtk', \ fname='output_aggs.vtu') # create the vtk file for a mesh vtk_writer.write_basic_mesh(Verts=V, E2V=E2V, \ mesh_type='tet', \ fname='output_mesh.vtu') # to use Paraview: # start Paraview: Paraview --data=output_mesh.vtu # apply # under display in the object inspector: # select wireframe representation # select a better solid color # selecting surface with edges and low opacity also helps
from scipy.linalg import svd import pylab from smoothed_aggregation_helmholtz_solver import smoothed_aggregation_helmholtz_solver, \ planewaves from pyamg.util.linalg import norm from pyamg import gallery from convergence_tools import * from my_vis import my_vis, shrink_elmts if __name__ == '__main__': # Retrieve 2-D Helmholtz Operator and problem data. # This is operator was discretized with a local # discontinuous Galerkin method. data = gallery.load_example('helmholtz_2D') A = data['A'].tocsr() omega = data['omega'] h = data['h'] ppw = data['ppw'] elements = data['elements'] vertices = data['vertices'] print "\nRunning 2D Helmholtz Example" print "-- %1.2f Points-per-wavelength" % ppw print "-- %1.2e = h, %1.2f = omega" % (h, omega) print "-- Discretized with a local discontinuous Galerkin method\n on annulus-shaped domain" # random initial guess for zero right-hand-side numpy.random.seed(625) x0 = scipy.rand(A.shape[0])
# 2D example of viewing aggregates from SA using VTK from pyamg.aggregation import standard_aggregation from pyamg.vis import vis_coarse, vtk_writer from pyamg.gallery import load_example # retrieve the problem data = load_example('unit_square') A = data['A'].tocsr() V = data['vertices'] E2V = data['elements'] # perform smoothed aggregation Agg, rootnodes = standard_aggregation(A) # create the vtk file of aggregates vis_coarse.vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, \ mesh_type='tri', output='vtk', \ fname='output_aggs.vtu') # create the vtk file for a mesh vtk_writer.write_basic_mesh(Verts=V, E2V=E2V, \ mesh_type='tri', \ fname='output_mesh.vtu') # to use Paraview: # start Paraview: Paraview --data=output_mesh.vtu # apply # under display in the object inspector: # select wireframe representation # select a better solid color # open file: output_aggs.vtu
# 2D example of viewing aggregates from SA using VTK from pyamg.aggregation import standard_aggregation from pyamg.vis import vis_coarse, vtk_writer from pyamg.gallery import load_example from pyamg import * from scipy import * # retrieve the problem data = load_example('unit_square') A = data['A'].tocsr() V = data['vertices'] E2V = data['elements'] # perform smoothed aggregation ml = smoothed_aggregation_solver(A,keep=True,max_coarse=10) b = sin(pi*V[:,0])*sin(pi*V[:,1]) x = ml.solve(b) # create the vtk file of aggregates vis_coarse.vis_aggregate_groups(Verts=V, E2V=E2V, Agg=ml.levels[0].AggOp, mesh_type='tri', output='vtk', fname='output_aggs.vtu') # create the vtk file for mesh and solution vtk_writer.write_basic_mesh(Verts=V, E2V=E2V, pdata = x, mesh_type='tri', fname='output_mesh.vtu') # to use Paraview: # start Paraview: Paraview --data=output_mesh.vtu
# 2D example of viewing aggregates from SA using VTK from pyamg.aggregation import standard_aggregation from pyamg.vis import vis_coarse, vtk_writer from pyamg.gallery import load_example # retrieve the problem data = load_example("unit_square") A = data["A"].tocsr() V = data["vertices"] E2V = data["elements"] # perform smoothed aggregation Agg, rootnodes = standard_aggregation(A) # create the vtk file of aggregates vis_coarse.vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, mesh_type="tri", output="vtk", fname="output_aggs.vtu") # create the vtk file for a mesh vtk_writer.write_basic_mesh(Verts=V, E2V=E2V, mesh_type="tri", fname="output_mesh.vtu") # to use Paraview: # start Paraview: Paraview --data=output_mesh.vtu # apply # under display in the object inspector: # select wireframe representation # select a better solid color # open file: output_aggs.vtu # under display in the object inspector: # select surface with edges representation # select a better solid color # increase line width and point size to see these aggs (if present)
def test_range(self): """Check that P*R=B""" numpy.random.seed(0) #make tests repeatable cases = [] ## # Simple, real-valued diffusion problems X = load_example('airfoil') A = X['A'].tocsr(); B = X['B'] cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'local'}) )) cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'block'}) )) cases.append((A,B,('energy', {'maxiter' : 3}) )) cases.append((A,B,('energy', {'krylov' : 'cgnr'}) )) cases.append((A,B,('energy', {'krylov' : 'gmres', 'degree' : 2}) )) A = poisson((10,10), format='csr') B = ones((A.shape[0],1)) cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'diagonal'}) )) cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'local'}) )) cases.append((A,B,'energy')) cases.append((A,B,('energy', {'degree' : 2}) )) cases.append((A,B,('energy', {'krylov' : 'cgnr', 'degree' : 2}) )) cases.append((A,B,('energy', {'krylov' : 'gmres'}) )) ## # Simple, imaginary-valued problems iA = 1.0j*A iB = 1.0 + rand(iA.shape[0],2) + 1.0j*(1.0 + rand(iA.shape[0],2)) cases.append((iA, B,('jacobi', {'filter' : True, 'weighting' : 'diagonal'}) )) cases.append((iA, B,('jacobi', {'filter' : True, 'weighting' : 'block'}) )) cases.append((iA,iB,('jacobi', {'filter' : True, 'weighting' : 'local'}) )) cases.append((iA,iB,('jacobi', {'filter' : True, 'weighting' : 'block'}) )) cases.append((iA.tobsr(blocksize=(5,5)), B, ('jacobi', {'filter' : True, 'weighting' : 'block'}) )) cases.append((iA.tobsr(blocksize=(5,5)), iB, ('jacobi', {'filter' : True, 'weighting' : 'block'}) )) cases.append((iA,B, ('energy', {'krylov' : 'cgnr', 'degree' : 2}) )) cases.append((iA,iB,('energy', {'krylov' : 'cgnr'}) )) cases.append((iA.tobsr(blocksize=(5,5)),B, ('energy', {'krylov' : 'cgnr', 'degree' : 2, 'maxiter' : 3}) )) cases.append((iA.tobsr(blocksize=(5,5)),iB,('energy', {'krylov' : 'cgnr'}) )) cases.append((iA,B, ('energy', {'krylov' : 'gmres'}) )) cases.append((iA,iB,('energy', {'krylov' : 'gmres', 'degree' : 2}) )) cases.append((iA.tobsr(blocksize=(5,5)),B, ('energy', {'krylov' : 'gmres', 'degree' : 2, 'maxiter' : 3}) )) cases.append((iA.tobsr(blocksize=(5,5)),iB,('energy', {'krylov' : 'gmres'}) )) ## # # Simple, imaginary-valued problems iA = A + 1.0j*scipy.sparse.eye(A.shape[0], A.shape[1]) cases.append((iA,B, ('jacobi', {'filter' : True, 'weighting' : 'local'}) )) cases.append((iA,B, ('jacobi', {'filter' : True, 'weighting' : 'block'}) )) cases.append((iA,iB,('jacobi', {'filter' : True, 'weighting' : 'diagonal'}) )) cases.append((iA,iB,('jacobi', {'filter' : True, 'weighting' : 'block'}) )) cases.append((iA.tobsr(blocksize=(4,4)), iB, ('jacobi', {'filter' : True, 'weighting' : 'block'}) )) cases.append((iA,B, ('energy', {'krylov' : 'cgnr'}) )) cases.append((iA.tobsr(blocksize=(4,4)),iB,('energy', {'krylov' : 'cgnr'}) )) cases.append((iA,B, ('energy', {'krylov' : 'gmres'}) )) cases.append((iA.tobsr(blocksize=(4,4)),iB, ('energy', {'krylov' : 'gmres', 'degree' : 2, 'maxiter' : 3}) )) ## # A = gauge_laplacian(10, spacing=1.0, beta=0.21) B = ones((A.shape[0],1)) cases.append((A,iB,('jacobi', {'filter' : True, 'weighting' : 'diagonal'}) )) cases.append((A,iB,('jacobi', {'filter' : True, 'weighting' : 'local'}) )) cases.append((A,B, ('energy', {'krylov' : 'cg'}) )) cases.append((A,iB, ('energy', {'krylov' : 'cgnr'}) )) cases.append((A,iB, ('energy', {'krylov' : 'gmres'}) )) cases.append((A.tobsr(blocksize=(2,2)),B, ('energy', {'krylov' : 'cgnr', 'degree' : 2, 'maxiter' : 3}) )) cases.append((A.tobsr(blocksize=(2,2)),iB,('energy', {'krylov' : 'cg'}) )) cases.append((A.tobsr(blocksize=(2,2)),B, ('energy', {'krylov' : 'gmres', 'degree' : 2, 'maxiter' : 3}) )) ## # A,B = linear_elasticity((10,10)) cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'diagonal'}) )) cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'local'}) )) cases.append((A,B,('jacobi', {'filter' : True, 'weighting' : 'block'}) )) cases.append((A,B,('energy', {'degree' : 2}) )) cases.append((A,B,('energy', {'krylov' : 'cgnr'}) )) cases.append((A,B,('energy', {'krylov' : 'gmres', 'degree' : 2}) )) ## # Classic SA cases for A,B,smooth in cases: ml = smoothed_aggregation_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth ) P = ml.levels[0].P B = ml.levels[0].B R = ml.levels[1].B assert_almost_equal(P*R, B) def blocksize(A): # Helper Function: return the blocksize of a matrix if isspmatrix_bsr(A): return A.blocksize[0] else: return 1 ## # Root-node cases counter = 0 for A,B,smooth in cases: counter += 1 if isinstance( smooth, tuple): smoother = smooth[0] else: smoother = smooth if smoother == 'energy' and (B.shape[1] >= blocksize(A)): ml = rootnode_solver(A, B=B, max_coarse=1, max_levels=2, smooth=smooth, improve_candidates =[('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 4}), None], keep=True, symmetry='nonsymmetric') T = ml.levels[0].T.tocsr() Cpts = ml.levels[0].Cpts Bf = ml.levels[0].B Bf_H = ml.levels[0].BH Bc = ml.levels[1].B P = ml.levels[0].P.tocsr() ## # P should preserve B in its range, wherever P # has enough nonzeros mask = ((P.indptr[1:] - P.indptr[:-1]) >= B.shape[1]) assert_almost_equal( (P*Bc)[mask,:], Bf[mask,:]) assert_almost_equal( (P*Bc)[mask,:], Bf_H[mask,:]) ## # P should be the identity at Cpts I = eye(T.shape[1], T.shape[1], format='csr', dtype=T.dtype) I2 = P[Cpts,:] assert_almost_equal(I.data, I2.data) assert_equal(I.indptr, I2.indptr) assert_equal(I.indices, I2.indices) ## # T should be the identity at Cpts I2 = T[Cpts,:] assert_almost_equal(I.data, I2.data) assert_equal(I.indptr, I2.indptr) assert_equal(I.indices, I2.indices)
from pyamg.gallery import load_example from pyamg import smoothed_aggregation_solver, rootnode_solver from convergence_tools import print_cycle_history if __name__ == '__main__': print "Test convergence of a small recirculating flow problem " + \ "that generates a nonsymmetric matrix " choice = input('\n Input Choice:\n' + \ '1: Run smoothed_aggregation_solver\n' + \ '2: Run rootnode_solver\n' ) # Recirculating flow, nonsymmetric matrix data = load_example('recirc_flow') A = data['A'].tocsr() B = data['B'] elements = data['elements'] vertice = data['vertices'] numpy.random.seed(625) x0 = scipy.rand(A.shape[0]) b = A*scipy.rand(A.shape[0]) ## # For demonstration, show that a solver constructed for a symmetric # operator fails for this matrix. smooth=('energy', {'krylov' : 'cg'}) SA_build_args={'max_levels':10, 'max_coarse':25, 'coarse_solver':'pinv2', \ 'symmetry':'hermitian'}
def test_evolution_strength_of_connection(self): # Params: A, B, epsilon=4.0, k=2, proj_type="l2" cases = [] # Ensure that isotropic diffusion results in isotropic strength stencil for N in [3, 5, 7, 10]: A = poisson((N, ), format='csr') B = np.ones((A.shape[0], 1)) cases.append({ 'A': A.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2' }) # Ensure that anisotropic diffusion results in an anisotropic # strength stencil for N in [3, 6, 7]: u = np.ones(N * N) A = spdiags([-u, -0.001 * u, 2.002 * u, -0.001 * u, -u], [-N, -1, 0, 1, N], N * N, N * N, format='csr') B = np.ones((A.shape[0], 1)) cases.append({ 'A': A.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2' }) # Ensure that isotropic elasticity results in an isotropic stencil for N in [3, 6, 7]: (A, B) = linear_elasticity((N, N), format='bsr') cases.append({ 'A': A.copy(), 'B': B.copy(), 'epsilon': 32.0, 'k': 8, 'proj': 'D_A' }) # Run an example with a non-uniform stencil ex = load_example('airfoil') A = ex['A'].tocsr() B = np.ones((A.shape[0], 1)) cases.append({ 'A': A.copy(), 'B': B.copy(), 'epsilon': 8.0, 'k': 4, 'proj': 'D_A' }) Absr = A.tobsr(blocksize=(5, 5)) cases.append({ 'A': Absr.copy(), 'B': B.copy(), 'epsilon': 8.0, 'k': 4, 'proj': 'D_A' }) # Different B B = arange(1, 2 * A.shape[0] + 1, dtype=float).reshape(-1, 2) cases.append({ 'A': A.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2' }) cases.append({ 'A': Absr.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2' }) # Zero row and column A.data[A.indptr[4]:A.indptr[5]] = 0.0 A = A.tocsc() A.data[A.indptr[4]:A.indptr[5]] = 0.0 A.eliminate_zeros() A = A.tocsr() A.sort_indices() cases.append({ 'A': A.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2' }) Absr = A.tobsr(blocksize=(5, 5)) cases.append({ 'A': Absr.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2' }) for ca in cases: scipy.random.seed(0) # make results deterministic result = evolution_soc(ca['A'], ca['B'], epsilon=ca['epsilon'], k=ca['k'], proj_type=ca['proj'], symmetrize_measure=False) scipy.random.seed(0) # make results deterministic expected = reference_evolution_soc(ca['A'], ca['B'], epsilon=ca['epsilon'], k=ca['k'], proj_type=ca['proj']) assert_array_almost_equal(result.todense(), expected.todense(), decimal=4) scipy.random.seed(0) # make results deterministic result = evolution_soc(ca['A'], ca['B'], epsilon=ca['epsilon'], k=ca['k'], proj_type=ca['proj'], symmetrize_measure=False, weighting='local') scipy.random.seed(0) # make results deterministic expected = reference_evolution_soc(ca['A'], ca['B'], epsilon=ca['epsilon'], k=ca['k'], proj_type=ca['proj'], weighting='local') assert_array_almost_equal(result.todense(), expected.todense(), decimal=4) # Test Scale Invariance for multiple near nullspace candidates (A, B) = linear_elasticity((5, 5), format='bsr') scipy.random.seed(0) # make results deterministic result_unscaled = evolution_soc(A, B, epsilon=4.0, k=2, proj_type="D_A", symmetrize_measure=False) # create scaled A D = spdiags([arange(A.shape[0], 2 * A.shape[0], dtype=float)], [0], A.shape[0], A.shape[0], format='csr') Dinv = spdiags([1.0 / arange(A.shape[0], 2 * A.shape[0], dtype=float)], [0], A.shape[0], A.shape[0], format='csr') scipy.random.seed(0) # make results deterministic result_scaled = evolution_soc((D * A * D).tobsr(blocksize=(2, 2)), Dinv * B, epsilon=4.0, k=2, proj_type="D_A", symmetrize_measure=False) assert_array_almost_equal(result_scaled.todense(), result_unscaled.todense(), decimal=2)
from scipy.linalg import svd import pylab from smoothed_aggregation_helmholtz_solver import smoothed_aggregation_helmholtz_solver, \ planewaves from pyamg.util.linalg import norm from pyamg import gallery from convergence_tools import * from my_vis import my_vis, shrink_elmts if __name__ == '__main__': # Retrieve 2-D Helmholtz Operator and problem data. # This is operator was discretized with a local # discontinuous Galerkin method. data = gallery.load_example('helmholtz_2D') A = data['A'].tocsr() omega = data['omega'] h = data['h'] ppw = data['ppw'] elements = data['elements'] vertices = data['vertices'] print "\nRunning 2D Helmholtz Example" print "-- %1.2f Points-per-wavelength"%ppw print "-- %1.2e = h, %1.2f = omega"%(h,omega) print "-- Discretized with a local discontinuous Galerkin method\n on annulus-shaped domain" # random initial guess for zero right-hand-side numpy.random.seed(625) x0 = scipy.rand(A.shape[0])
""" import numpy import scipy from pyamg.gallery import load_example from pyamg import smoothed_aggregation_solver from convergence_tools import print_cycle_history if __name__ == '__main__': print "\nDiffusion problem discretized with p=5 and the local\n" + \ "discontinuous Galerkin method." # Discontinuous Galerkin Diffusion Problem data = load_example('local_disc_galerkin_diffusion') A = data['A'].tocsr() B = data['B'] elements = data['elements'] vertices = data['vertices'] numpy.random.seed(625) x0 = scipy.rand(A.shape[0]) b = numpy.zeros_like(x0) ## # For demonstration, show that a naive SA solver # yields unsatisfactory convergence smooth = ('jacobi', {'filter': True}) strength = ('symmetric', {'theta': 0.1}) SA_solve_args = {'cycle': 'W', 'maxiter': 20, 'tol': 1e-8, 'accel': 'cg'} SA_build_args={'max_levels':10, 'max_coarse':25, 'coarse_solver':'pinv2', \
## # To run the best solver found above, uncomment next two lines #from elas_diagnostic import elas_diagnostic #elas_diagnostic(A) if choice == 4: ## # Try a basic nonsymmetric recirculating flow problem # --> Only use V-cycles by specifying cycle_list # --> Don't specify symmetry and definiteness and allow for auto-detection # --> Specify the maximum coarse size and coarse grid solver with coarse_size_list # --> Try two different Krylov wrappers and set the maximum number of iterations # and halting tolerance with krylov_list. A = gallery.load_example('recirc_flow')['A'].tocsr() solver_diagnostics(A, fname='recirc_flow_diagnostic', cycle_list=['V'], coarse_size_list=[(15, 'pinv')], krylov_list=[('gmres', { 'tol': 1e-12, 'maxiter': 100 }), ('bicgstab', { 'tol': 1e-12, 'maxiter': 100 })]) ## # To run the best solver found above, uncomment next two lines #from recirc_flow_diagnostic import recirc_flow_diagnostic #recirc_flow_diagnostic(A)
def test_evolution_strength_of_connection(self): # Params: A, B, epsilon=4.0, k=2, proj_type="l2" cases = [] # Ensure that isotropic diffusion results in isotropic strength stencil for N in [3, 5, 7, 10]: A = poisson((N,), format='csr') B = np.ones((A.shape[0], 1)) cases.append({'A': A.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2'}) # Ensure that anisotropic diffusion results in an anisotropic # strength stencil for N in [3, 6, 7]: u = np.ones(N*N) A = spdiags([-u, -0.001*u, 2.002*u, -0.001*u, -u], [-N, -1, 0, 1, N], N*N, N*N, format='csr') B = np.ones((A.shape[0], 1)) cases.append({'A': A.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2'}) # Ensure that isotropic elasticity results in an isotropic stencil for N in [3, 6, 7]: (A, B) = linear_elasticity((N, N), format='bsr') cases.append({'A': A.copy(), 'B': B.copy(), 'epsilon': 32.0, 'k': 8, 'proj': 'D_A'}) # Run an example with a non-uniform stencil ex = load_example('airfoil') A = ex['A'].tocsr() B = np.ones((A.shape[0], 1)) cases.append({'A': A.copy(), 'B': B.copy(), 'epsilon': 8.0, 'k': 4, 'proj': 'D_A'}) Absr = A.tobsr(blocksize=(5, 5)) cases.append({'A': Absr.copy(), 'B': B.copy(), 'epsilon': 8.0, 'k': 4, 'proj': 'D_A'}) # Different B B = arange(1, 2*A.shape[0]+1, dtype=float).reshape(-1, 2) cases.append({'A': A.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2'}) cases.append({'A': Absr.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2'}) # Zero row and column A.data[A.indptr[4]:A.indptr[5]] = 0.0 A = A.tocsc() A.data[A.indptr[4]:A.indptr[5]] = 0.0 A.eliminate_zeros() A = A.tocsr() A.sort_indices() cases.append({'A': A.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2'}) Absr = A.tobsr(blocksize=(5, 5)) cases.append({'A': Absr.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2, 'proj': 'l2'}) for ca in cases: scipy.random.seed(0) # make results deterministic result = evolution_soc(ca['A'], ca['B'], epsilon=ca['epsilon'], k=ca['k'], proj_type=ca['proj'], symmetrize_measure=False) scipy.random.seed(0) # make results deterministic expected = reference_evolution_soc(ca['A'], ca['B'], epsilon=ca['epsilon'], k=ca['k'], proj_type=ca['proj']) assert_array_almost_equal(result.todense(), expected.todense(), decimal=4) # Test Scale Invariance for multiple near nullspace candidates (A, B) = linear_elasticity((5, 5), format='bsr') scipy.random.seed(0) # make results deterministic result_unscaled = evolution_soc(A, B, epsilon=4.0, k=2, proj_type="D_A", symmetrize_measure=False) # create scaled A D = spdiags([arange(A.shape[0], 2*A.shape[0], dtype=float)], [0], A.shape[0], A.shape[0], format='csr') Dinv = spdiags([1.0/arange(A.shape[0], 2*A.shape[0], dtype=float)], [0], A.shape[0], A.shape[0], format='csr') scipy.random.seed(0) # make results deterministic result_scaled = evolution_soc((D*A*D).tobsr(blocksize=(2, 2)), Dinv*B, epsilon=4.0, k=2, proj_type="D_A", symmetrize_measure=False) assert_array_almost_equal(result_scaled.todense(), result_unscaled.todense(), decimal=2)
from pyamg.gallery import load_example from pyamg import smoothed_aggregation_solver, rootnode_solver from convergence_tools import print_cycle_history if __name__ == '__main__': print "Test convergence of a small recirculating flow problem " + \ "that generates a nonsymmetric matrix " choice = input('\n Input Choice:\n' + \ '1: Run smoothed_aggregation_solver\n' + \ '2: Run rootnode_solver\n' ) # Recirculating flow, nonsymmetric matrix data = load_example('recirc_flow') A = data['A'].tocsr() B = data['B'] elements = data['elements'] vertice = data['vertices'] numpy.random.seed(625) x0 = scipy.rand(A.shape[0]) b = A * scipy.rand(A.shape[0]) ## # For demonstration, show that a solver constructed for a symmetric # operator fails for this matrix. smooth = ('energy', {'krylov': 'cg'}) SA_build_args={'max_levels':10, 'max_coarse':25, 'coarse_solver':'pinv2', \ 'symmetry':'hermitian'}
""" import numpy import scipy from pyamg.gallery import load_example from pyamg import smoothed_aggregation_solver from convergence_tools import print_cycle_history if __name__ == '__main__': print "\nDiffusion problem discretized with p=5 and the local\n" + \ "discontinuous Galerkin method." # Discontinuous Galerkin Diffusion Problem data = load_example('local_disc_galerkin_diffusion') A = data['A'].tocsr() B = data['B'] elements = data['elements'] vertices = data['vertices'] numpy.random.seed(625) x0 = scipy.rand(A.shape[0]) b = numpy.zeros_like(x0) ## # For demonstration, show that a naive SA solver # yields unsatisfactory convergence smooth=('jacobi', {'filter' : True}) strength=('symmetric', {'theta' : 0.1}) SA_solve_args={'cycle':'W', 'maxiter':20, 'tol':1e-8, 'accel' : 'cg'} SA_build_args={'max_levels':10, 'max_coarse':25, 'coarse_solver':'pinv2', \
def test_nonsymmetric(self): # problem data data = load_example("recirc_flow") A = data["A"].tocsr() B = data["B"] numpy.random.seed(625) x0 = scipy.rand(A.shape[0]) b = A * scipy.rand(A.shape[0]) # solver parameters smooth = ("energy", {"krylov": "gmres"}) SA_build_args = {"max_coarse": 25, "coarse_solver": "pinv2", "symmetry": "nonsymmetric"} SA_solve_args = {"cycle": "V", "maxiter": 20, "tol": 1e-8} strength = [("evolution", {"k": 2, "epsilon": 8.0})] smoother = ("gauss_seidel_nr", {"sweep": "symmetric", "iterations": 1}) improve_candidates = [("gauss_seidel_nr", {"sweep": "symmetric", "iterations": 4}), None] # Construct solver with nonsymmetric parameters sa = rootnode_solver( A, B=B, smooth=smooth, improve_candidates=improve_candidates, strength=strength, presmoother=smoother, postsmoother=smoother, **SA_build_args ) residuals = [] # stand-alone solve x = sa.solve(b, x0=x0, residuals=residuals, **SA_solve_args) residuals = array(residuals) avg_convergence_ratio = (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) # print "Test 1 %1.3e, %1.3e" % (avg_convergence_ratio, 0.7) assert avg_convergence_ratio < 0.7 # accelerated solve residuals = [] x = sa.solve(b, x0=x0, residuals=residuals, accel="gmres", **SA_solve_args) del x residuals = array(residuals) avg_convergence_ratio = (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) # print "Test 2 %1.3e, %1.3e" % (avg_convergence_ratio, 0.45) assert avg_convergence_ratio < 0.45 # test that nonsymmetric parameters give the same result as symmetric # parameters for Poisson problem A = poisson((15, 15), format="csr") strength = "symmetric" SA_build_args["symmetry"] = "nonsymmetric" sa_nonsymm = rootnode_solver( A, B=ones((A.shape[0], 1)), smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None, **SA_build_args ) SA_build_args["symmetry"] = "symmetric" sa_symm = rootnode_solver( A, B=ones((A.shape[0], 1)), smooth=smooth, strength=strength, presmoother=smoother, postsmoother=smoother, improve_candidates=None, **SA_build_args ) for (symm_lvl, nonsymm_lvl) in zip(sa_nonsymm.levels, sa_symm.levels): assert_array_almost_equal(symm_lvl.A.todense(), nonsymm_lvl.A.todense())