def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates, diagonal_dominance=False, keep=True): """Service routine to implement the strength of connection, aggregation, tentative prolongation construction, and prolongation smoothing. Called by smoothed_aggregation_solver. """ def unpack_arg(v): if isinstance(v, tuple): return v[0], v[1] else: return v, {} A = levels[-1].A B = levels[-1].B if A.symmetry == "nonsymmetric": AH = A.H.asformat(A.format) BH = levels[-1].BH ## # Compute the strength-of-connection matrix C, where larger # C[i,j] denote stronger couplings between i and j. fn, kwargs = unpack_arg(strength[len(levels) - 1]) if fn == 'symmetric': C = symmetric_strength_of_connection(A, **kwargs) elif fn == 'classical': C = classical_strength_of_connection(A, **kwargs) elif fn == 'distance': C = distance_strength_of_connection(A, **kwargs) elif (fn == 'ode') or (fn == 'evolution'): if 'B' in kwargs: C = evolution_strength_of_connection(A, **kwargs) else: C = evolution_strength_of_connection(A, B, **kwargs) elif fn == 'energy_based': C = energy_based_strength_of_connection(A, **kwargs) elif fn == 'predefined': C = kwargs['C'].tocsr() elif fn == 'algebraic_distance': C = algebraic_distance(A, **kwargs) elif fn is None: C = A.tocsr() else: raise ValueError('unrecognized strength of connection method: %s' % str(fn)) ## # Avoid coarsening diagonally dominant rows flag, kwargs = unpack_arg(diagonal_dominance) if flag: C = eliminate_diag_dom_nodes(A, C, **kwargs) ## # Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A). # AggOp is a boolean matrix, where the sparsity pattern for the k-th column # denotes the fine-grid nodes agglomerated into k-th coarse-grid node. fn, kwargs = unpack_arg(aggregate[len(levels) - 1]) if fn == 'standard': AggOp = standard_aggregation(C, **kwargs)[0] elif fn == 'naive': AggOp = naive_aggregation(C, **kwargs)[0] elif fn == 'lloyd': AggOp = lloyd_aggregation(C, **kwargs)[0] elif fn == 'predefined': AggOp = kwargs['AggOp'].tocsr() else: raise ValueError('unrecognized aggregation method %s' % str(fn)) ## # Improve near nullspace candidates by relaxing on A B = 0 fn, kwargs = unpack_arg(improve_candidates[len(levels) - 1]) if fn is not None: b = np.zeros((A.shape[0], 1), dtype=A.dtype) B = relaxation_as_linear_operator((fn, kwargs), A, b) * B levels[-1].B = B if A.symmetry == "nonsymmetric": BH = relaxation_as_linear_operator((fn, kwargs), AH, b) * BH levels[-1].BH = BH ## # Compute the tentative prolongator, T, which is a tentative interpolation # matrix from the coarse-grid to the fine-grid. T exactly interpolates # B_fine = T B_coarse. T, B = fit_candidates(AggOp, B) if A.symmetry == "nonsymmetric": TH, BH = fit_candidates(AggOp, BH) ## # Smooth the tentative prolongator, so that it's accuracy is greatly # improved for algebraically smooth error. fn, kwargs = unpack_arg(smooth[len(levels) - 1]) if fn == 'jacobi': P = jacobi_prolongation_smoother(A, T, C, B, **kwargs) elif fn == 'richardson': P = richardson_prolongation_smoother(A, T, **kwargs) elif fn == 'energy': P = energy_prolongation_smoother(A, T, C, B, None, (False, {}), **kwargs) elif fn is None: P = T else: raise ValueError('unrecognized prolongation smoother method %s' % str(fn)) ## # Compute the restriction matrix, R, which interpolates from the fine-grid # to the coarse-grid. If A is nonsymmetric, then R must be constructed # based on A.H. Otherwise R = P.H or P.T. symmetry = A.symmetry if symmetry == 'hermitian': R = P.H elif symmetry == 'symmetric': R = P.T elif symmetry == 'nonsymmetric': fn, kwargs = unpack_arg(smooth[len(levels) - 1]) if fn == 'jacobi': R = jacobi_prolongation_smoother(AH, TH, C, BH, **kwargs).H elif fn == 'richardson': R = richardson_prolongation_smoother(AH, TH, **kwargs).H elif fn == 'energy': R = energy_prolongation_smoother(AH, TH, C, BH, None, (False, {}), **kwargs) R = R.H elif fn is None: R = T.H else: raise ValueError('unrecognized prolongation smoother method %s' % str(fn)) if keep: levels[-1].C = C # strength of connection matrix levels[-1].AggOp = AggOp # aggregation operator levels[-1].T = T # tentative prolongator levels[-1].P = P # smoothed prolongator levels[-1].R = R # restriction operator levels.append(multilevel_solver.level()) A = R * A * P # Galerkin operator A.symmetry = symmetry levels[-1].A = A levels[-1].B = B # right near nullspace candidates if A.symmetry == "nonsymmetric": levels[-1].BH = BH # left near nullspace candidates
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates, diagonal_dominance=False, keep=True): """Service routine to implement the strength of connection, aggregation, tentative prolongation construction, and prolongation smoothing. Called by smoothed_aggregation_solver. """ def unpack_arg(v): if isinstance(v, tuple): return v[0], v[1] else: return v, {} A = levels[-1].A B = levels[-1].B if A.symmetry == "nonsymmetric": AH = A.H.asformat(A.format) BH = levels[-1].BH # Compute the strength-of-connection matrix C, where larger # C[i,j] denote stronger couplings between i and j. fn, kwargs = unpack_arg(strength[len(levels)-1]) if fn == 'symmetric': C = symmetric_strength_of_connection(A, **kwargs) elif fn == 'classical': C = classical_strength_of_connection(A, **kwargs) elif fn == 'distance': C = distance_strength_of_connection(A, **kwargs) elif (fn == 'ode') or (fn == 'evolution'): if 'B' in kwargs: C = evolution_strength_of_connection(A, **kwargs) else: C = evolution_strength_of_connection(A, B, **kwargs) elif fn == 'energy_based': C = energy_based_strength_of_connection(A, **kwargs) elif fn == 'predefined': C = kwargs['C'].tocsr() elif fn == 'algebraic_distance': C = algebraic_distance(A, **kwargs) elif fn is None: C = A.tocsr() else: raise ValueError('unrecognized strength of connection method: %s' % str(fn)) # Avoid coarsening diagonally dominant rows flag, kwargs = unpack_arg(diagonal_dominance) if flag: C = eliminate_diag_dom_nodes(A, C, **kwargs) # Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A). # AggOp is a boolean matrix, where the sparsity pattern for the k-th column # denotes the fine-grid nodes agglomerated into k-th coarse-grid node. fn, kwargs = unpack_arg(aggregate[len(levels)-1]) if fn == 'standard': AggOp = standard_aggregation(C, **kwargs)[0] elif fn == 'naive': AggOp = naive_aggregation(C, **kwargs)[0] elif fn == 'lloyd': AggOp = lloyd_aggregation(C, **kwargs)[0] elif fn == 'predefined': AggOp = kwargs['AggOp'].tocsr() else: raise ValueError('unrecognized aggregation method %s' % str(fn)) # Improve near nullspace candidates by relaxing on A B = 0 fn, kwargs = unpack_arg(improve_candidates[len(levels)-1]) if fn is not None: b = np.zeros((A.shape[0], 1), dtype=A.dtype) B = relaxation_as_linear_operator((fn, kwargs), A, b) * B levels[-1].B = B if A.symmetry == "nonsymmetric": BH = relaxation_as_linear_operator((fn, kwargs), AH, b) * BH levels[-1].BH = BH # Compute the tentative prolongator, T, which is a tentative interpolation # matrix from the coarse-grid to the fine-grid. T exactly interpolates # B_fine = T B_coarse. T, B = fit_candidates(AggOp, B) if A.symmetry == "nonsymmetric": TH, BH = fit_candidates(AggOp, BH) # Smooth the tentative prolongator, so that it's accuracy is greatly # improved for algebraically smooth error. fn, kwargs = unpack_arg(smooth[len(levels)-1]) if fn == 'jacobi': P = jacobi_prolongation_smoother(A, T, C, B, **kwargs) elif fn == 'richardson': P = richardson_prolongation_smoother(A, T, **kwargs) elif fn == 'energy': P = energy_prolongation_smoother(A, T, C, B, None, (False, {}), **kwargs) elif fn is None: P = T else: raise ValueError('unrecognized prolongation smoother method %s' % str(fn)) # Compute the restriction matrix, R, which interpolates from the fine-grid # to the coarse-grid. If A is nonsymmetric, then R must be constructed # based on A.H. Otherwise R = P.H or P.T. symmetry = A.symmetry if symmetry == 'hermitian': R = P.H elif symmetry == 'symmetric': R = P.T elif symmetry == 'nonsymmetric': fn, kwargs = unpack_arg(smooth[len(levels)-1]) if fn == 'jacobi': R = jacobi_prolongation_smoother(AH, TH, C, BH, **kwargs).H elif fn == 'richardson': R = richardson_prolongation_smoother(AH, TH, **kwargs).H elif fn == 'energy': R = energy_prolongation_smoother(AH, TH, C, BH, None, (False, {}), **kwargs) R = R.H elif fn is None: R = T.H else: raise ValueError('unrecognized prolongation smoother method %s' % str(fn)) if keep: levels[-1].C = C # strength of connection matrix levels[-1].AggOp = AggOp # aggregation operator levels[-1].T = T # tentative prolongator levels[-1].P = P # smoothed prolongator levels[-1].R = R # restriction operator levels.append(multilevel_solver.level()) A = R * A * P # Galerkin operator A.symmetry = symmetry levels[-1].A = A levels[-1].B = B # right near nullspace candidates if A.symmetry == "nonsymmetric": levels[-1].BH = BH # left near nullspace candidates
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother, smooth, eliminate_local, coarse_solver, work): """ Computes additional candidates and improvements following Algorithm 4 in Brezina et al. Parameters ---------- candidate_iters number of test relaxation iterations epsilon minimum acceptable relaxation convergence factor References ---------- .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge "Adaptive Smoothed Aggregation (alphaSA) Multigrid" SIAM Review Volume 47, Issue 2 (2005) http://www.cs.umn.edu/~maclach/research/aSA2.pdf """ def make_bridge(T): M, N = T.shape K = T.blocksize[0] bnnz = T.indptr[-1] # the K+1 represents the new dof introduced by the new candidate. the # bridge 'T' ignores this new dof and just maps zeros there data = numpy.zeros((bnnz, K+1, K), dtype=T.dtype) data[:, :-1, :] = T.data return bsr_matrix((data, T.indices, T.indptr), shape=((K + 1) * (M / K), N)) def expand_candidates(B_old, nodesize): # insert a new dof that is always zero, to create NullDim+1 dofs per # node in B NullDim = B_old.shape[1] nnodes = B_old.shape[0] / nodesize Bnew = numpy.zeros((nnodes, nodesize+1, NullDim), dtype=B_old.dtype) Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim) return Bnew.reshape(-1, NullDim) levels = ml.levels x = scipy.rand(levels[0].A.shape[0], 1) if levels[0].A.dtype == complex: x = x + 1.0j*scipy.rand(levels[0].A.shape[0], 1) b = numpy.zeros_like(x) x = ml.solve(b, x0=x, tol=float(numpy.finfo(numpy.float).tiny), maxiter=candidate_iters) work[:] += ml.operator_complexity()*ml.levels[0].A.nnz*candidate_iters*2 T0 = levels[0].T.copy() #TEST FOR CONVERGENCE HERE for i in range(len(ml.levels) - 2): # alpha-SA paper does local elimination here, but after talking # to Marian, its not clear that this helps things # fn, kwargs = unpack_arg(eliminate_local) # if fn == True: # eliminate_local_candidates(x,levels[i].AggOp,levels[i].A, # levels[i].T, **kwargs) # add candidate to B B = numpy.hstack((levels[i].B, x.reshape(-1, 1))) # construct Ptent T, R = fit_candidates(levels[i].AggOp, B) levels[i].T = T x = R[:, -1].reshape(-1, 1) # smooth P fn, kwargs = unpack_arg(smooth[i]) if fn == 'jacobi': levels[i].P = jacobi_prolongation_smoother(levels[i].A, T, levels[i].C, R, **kwargs) elif fn == 'richardson': levels[i].P = richardson_prolongation_smoother(levels[i].A, T, **kwargs) elif fn == 'energy': levels[i].P = energy_prolongation_smoother(levels[i].A, T, levels[i].C, R, None, (False, {}), **kwargs) x = R[:, -1].reshape(-1, 1) elif fn is None: levels[i].P = T else: raise ValueError('unrecognized prolongation smoother method %s' % str(fn)) # construct R if symmetry == 'symmetric': # R should reflect A's structure levels[i].R = levels[i].P.T.asformat(levels[i].P.format) elif symmetry == 'hermitian': levels[i].R = levels[i].P.H.asformat(levels[i].P.format) # construct coarse A levels[i+1].A = levels[i].R * levels[i].A * levels[i].P # construct bridging P T_bridge = make_bridge(levels[i+1].T) R_bridge = levels[i+2].B # smooth bridging P fn, kwargs = unpack_arg(smooth[i+1]) if fn == 'jacobi': levels[i+1].P = jacobi_prolongation_smoother(levels[i+1].A, T_bridge, levels[i+1].C, R_bridge, **kwargs) elif fn == 'richardson': levels[i+1].P = richardson_prolongation_smoother(levels[i+1].A, T_bridge, **kwargs) elif fn == 'energy': levels[i+1].P = energy_prolongation_smoother(levels[i+1].A, T_bridge, levels[i+1].C, R_bridge, None, (False, {}), **kwargs) elif fn is None: levels[i+1].P = T_bridge else: raise ValueError('unrecognized prolongation smoother method %s' % str(fn)) # construct the "bridging" R if symmetry == 'symmetric': # R should reflect A's structure levels[i+1].R = levels[i+1].P.T.asformat(levels[i+1].P.format) elif symmetry == 'hermitian': levels[i+1].R = levels[i+1].P.H.asformat(levels[i+1].P.format) # run solver on candidate solver = multilevel_solver(levels[i+1:], coarse_solver=coarse_solver) change_smoothers(solver, presmoother=prepostsmoother, postsmoother=prepostsmoother) x = solver.solve(numpy.zeros_like(x), x0=x, tol=float(numpy.finfo(numpy.float).tiny), maxiter=candidate_iters) work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\ candidate_iters*2 # update values on next level levels[i+1].B = R[:, :-1].copy() levels[i+1].T = T_bridge # note that we only use the x from the second coarsest level fn, kwargs = unpack_arg(prepostsmoother) for lvl in reversed(levels[:-2]): x = lvl.P * x work[:] += lvl.A.nnz*candidate_iters*2 if fn == 'gauss_seidel': # only relax at nonzeros, so as not to mess up any locally dropped # candidates indices = numpy.ravel(x).nonzero()[0] gauss_seidel_indexed(lvl.A, x, numpy.zeros_like(x), indices, iterations=candidate_iters, sweep='symmetric') elif fn == 'gauss_seidel_ne': gauss_seidel_ne(lvl.A, x, numpy.zeros_like(x), iterations=candidate_iters, sweep='symmetric') elif fn == 'gauss_seidel_nr': gauss_seidel_nr(lvl.A, x, numpy.zeros_like(x), iterations=candidate_iters, sweep='symmetric') elif fn == 'jacobi': jacobi(lvl.A, x, numpy.zeros_like(x), iterations=1, omega=1.0 / rho_D_inv_A(lvl.A)) elif fn == 'richardson': polynomial(lvl.A, x, numpy.zeros_like(x), iterations=1, coeffients=[1.0/approximate_spectral_radius(lvl.A)]) elif fn == 'gmres': x[:] = (gmres(lvl.A, numpy.zeros_like(x), x0=x, maxiter=candidate_iters)[0]).reshape(x.shape) else: raise TypeError('Unrecognized smoother') # x will be dense again, so we have to drop locally again elim, elim_kwargs = unpack_arg(eliminate_local) if elim is True: x = x/norm(x, 'inf') eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0, **elim_kwargs) return x.reshape(-1, 1)
def initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon, max_levels, max_coarse, aggregate, prepostsmoother, smooth, strength, work, initial_candidate=None): """ Computes a complete aggregation and the first near-nullspace candidate following Algorithm 3 in Brezina et al. Parameters ---------- candidate_iters number of test relaxation iterations epsilon minimum acceptable relaxation convergence factor References ---------- .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge "Adaptive Smoothed Aggregation ($\alpha$SA) Multigrid" SIAM Review Volume 47, Issue 2 (2005) http://www.cs.umn.edu/~maclach/research/aSA2.pdf """ ## # Define relaxation routine def relax(A, x): fn, kwargs = unpack_arg(prepostsmoother) if fn == 'gauss_seidel': gauss_seidel(A, x, numpy.zeros_like(x), iterations=candidate_iters, sweep='symmetric') elif fn == 'gauss_seidel_nr': gauss_seidel_nr(A, x, numpy.zeros_like(x), iterations=candidate_iters, sweep='symmetric') elif fn == 'gauss_seidel_ne': gauss_seidel_ne(A, x, numpy.zeros_like(x), iterations=candidate_iters, sweep='symmetric') elif fn == 'jacobi': jacobi(A, x, numpy.zeros_like(x), iterations=1, omega=1.0 / rho_D_inv_A(A)) elif fn == 'richardson': polynomial(A, x, numpy.zeros_like(x), iterations=1, coeffients=[1.0/approximate_spectral_radius(A)]) elif fn == 'gmres': x[:] = (gmres(A, numpy.zeros_like(x), x0=x, maxiter=candidate_iters)[0]).reshape(x.shape) else: raise TypeError('Unrecognized smoother') # flag for skipping steps f-i in step 4 skip_f_to_i = True #step 1 A_l = A if initial_candidate is None: x = scipy.rand(A_l.shape[0], 1) if A_l.dtype == complex: x = x + 1.0j*scipy.rand(A_l.shape[0], 1) else: x = numpy.array(initial_candidate, dtype=A_l.dtype) #step 2 relax(A_l, x) work[:] += A_l.nnz * candidate_iters*2 # step 3 # not advised to stop the iteration here: often the first relaxation pass # _is_ good, but the remaining passes are poor # if x_A_x/x_A_x_old < epsilon: # # relaxation alone is sufficient # print 'relaxation alone works: %g'%(x_A_x/x_A_x_old) # return x, [] # step 4 As = [A] xs = [x] Ps = [] AggOps = [] StrengthOps = [] while A.shape[0] > max_coarse and max_levels > 1: # The real check to break from the while loop is below # Begin constructing next level fn, kwargs = unpack_arg(strength[len(As)-1]) # step 4b if fn == 'symmetric': C_l = symmetric_strength_of_connection(A_l, **kwargs) # Diagonal must be nonzero C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr') elif fn == 'classical': C_l = classical_strength_of_connection(A_l, **kwargs) # Diagonal must be nonzero C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr') if isspmatrix_bsr(A_l): C_l = amalgamate(C, A_l.blocksize[0]) elif (fn == 'ode') or (fn == 'evolution'): C_l = evolution_strength_of_connection(A_l, numpy.ones( (A_l.shape[0], 1), dtype=A.dtype), **kwargs) elif fn == 'predefined': C_l = kwargs['C'].tocsr() elif fn is None: C_l = A_l.tocsr() else: raise ValueError('unrecognized strength of connection method: %s' % str(fn)) # In SA, strength represents "distance", so we take magnitude of # complex values if C_l.dtype == complex: C_l.data = numpy.abs(C_l.data) # Create a unified strength framework so that large values represent # strong connections and small values represent weak connections if (fn == 'ode') or (fn == 'evolutin') or (fn == 'energy_based'): C_l.data = 1.0 / C_l.data # aggregation fn, kwargs = unpack_arg(aggregate[len(As) - 1]) if fn == 'standard': AggOp = standard_aggregation(C_l, **kwargs)[0] elif fn == 'lloyd': AggOp = lloyd_aggregation(C_l, **kwargs)[0] elif fn == 'predefined': AggOp = kwargs['AggOp'].tocsr() else: raise ValueError('unrecognized aggregation method %s' % str(fn)) T_l, x = fit_candidates(AggOp, x) # step 4c fn, kwargs = unpack_arg(smooth[len(As)-1]) # step 4d if fn == 'jacobi': P_l = jacobi_prolongation_smoother(A_l, T_l, C_l, x, **kwargs) elif fn == 'richardson': P_l = richardson_prolongation_smoother(A_l, T_l, **kwargs) elif fn == 'energy': P_l = energy_prolongation_smoother(A_l, T_l, C_l, x, None, (False, {}), **kwargs) elif fn is None: P_l = T_l else: raise ValueError('unrecognized prolongation smoother method %s' % str(fn)) # R should reflect A's structure # step 4e if symmetry == 'symmetric': A_l = P_l.T.asformat(P_l.format) * A_l * P_l elif symmetry == 'hermitian': A_l = P_l.H.asformat(P_l.format) * A_l * P_l StrengthOps.append(C_l) AggOps.append(AggOp) Ps.append(P_l) As.append(A_l) # skip to step 5 as in step 4e if (A_l.shape[0] <= max_coarse) or (len(AggOps) + 1 >= max_levels): break if not skip_f_to_i: x_hat = x.copy() # step 4g relax(A_l, x) # step 4h work[:] += A_l.nnz*candidate_iters*2 if pdef is True: x_A_x = numpy.dot(numpy.conjugate(x).T, A_l*x) xhat_A_xhat = numpy.dot(numpy.conjugate(x_hat).T, A_l*x_hat) err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters) else: # use A.H A inner-product Ax = A_l * x #Axhat = A_l * x_hat x_A_x = numpy.dot(numpy.conjugate(Ax).T, Ax) xhat_A_xhat = numpy.dot(numpy.conjugate(x_hat).T, A_l*x_hat) err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters) if err_ratio < epsilon: # step 4i #print "sufficient convergence, skipping" skip_f_to_i = True if x_A_x == 0: x = x_hat # need to restore x else: # just carry out relaxation, don't check for convergence relax(A_l, x) # step 4h work[:] += 2 * A_l.nnz * candidate_iters # store xs for diagnostic use and for use in step 5 xs.append(x) # step 5 # Extend coarse-level candidate to the finest level # --> note that we start with the x from the second coarsest level x = xs[-1] # make sure that xs[-1] has been relaxed by step 4h, i.e. relax(As[-2], x) for lev in range(len(Ps)-2, -1, -1): # lev = coarsest ... finest-1 P = Ps[lev] # I: lev --> lev+1 A = As[lev] # A on lev+1 x = P * x relax(A, x) work[:] += A.nnz*candidate_iters*2 # Set predefined strength of connection and aggregation if len(AggOps) > 1: aggregate = [('predefined', {'AggOp': AggOps[i]}) for i in range(len(AggOps))] strength = [('predefined', {'C': StrengthOps[i]}) for i in range(len(StrengthOps))] return x, aggregate, strength # first candidate
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother, smooth, eliminate_local, coarse_solver, work): """ Computes additional candidates and improvements following Algorithm 4 in Brezina et al. Parameters ---------- candidate_iters number of test relaxation iterations epsilon minimum acceptable relaxation convergence factor References ---------- .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge "Adaptive Smoothed Aggregation (alphaSA) Multigrid" SIAM Review Volume 47, Issue 2 (2005) http://www.cs.umn.edu/~maclach/research/aSA2.pdf """ def make_bridge(T): M, N = T.shape K = T.blocksize[0] bnnz = T.indptr[-1] # the K+1 represents the new dof introduced by the new candidate. the # bridge 'T' ignores this new dof and just maps zeros there data = np.zeros((bnnz, K + 1, K), dtype=T.dtype) data[:, :-1, :] = T.data return bsr_matrix((data, T.indices, T.indptr), shape=((K + 1) * (M / K), N)) def expand_candidates(B_old, nodesize): # insert a new dof that is always zero, to create NullDim+1 dofs per # node in B NullDim = B_old.shape[1] nnodes = B_old.shape[0] / nodesize Bnew = np.zeros((nnodes, nodesize + 1, NullDim), dtype=B_old.dtype) Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim) return Bnew.reshape(-1, NullDim) levels = ml.levels x = sp.rand(levels[0].A.shape[0], 1) if levels[0].A.dtype == complex: x = x + 1.0j * sp.rand(levels[0].A.shape[0], 1) b = np.zeros_like(x) x = ml.solve(b, x0=x, tol=float(np.finfo(np.float).tiny), maxiter=candidate_iters) work[:] += ml.operator_complexity( ) * ml.levels[0].A.nnz * candidate_iters * 2 T0 = levels[0].T.copy() # TEST FOR CONVERGENCE HERE for i in range(len(ml.levels) - 2): # alpha-SA paper does local elimination here, but after talking # to Marian, its not clear that this helps things # fn, kwargs = unpack_arg(eliminate_local) # if fn == True: # eliminate_local_candidates(x,levels[i].AggOp,levels[i].A, # levels[i].T, **kwargs) # add candidate to B B = np.hstack((levels[i].B, x.reshape(-1, 1))) # construct Ptent T, R = fit_candidates(levels[i].AggOp, B) levels[i].T = T x = R[:, -1].reshape(-1, 1) # smooth P fn, kwargs = unpack_arg(smooth[i]) if fn == 'jacobi': levels[i].P = jacobi_prolongation_smoother(levels[i].A, T, levels[i].C, R, **kwargs) elif fn == 'richardson': levels[i].P = richardson_prolongation_smoother( levels[i].A, T, **kwargs) elif fn == 'energy': levels[i].P = energy_prolongation_smoother(levels[i].A, T, levels[i].C, R, None, (False, {}), **kwargs) x = R[:, -1].reshape(-1, 1) elif fn is None: levels[i].P = T else: raise ValueError('unrecognized prolongation smoother method %s' % str(fn)) # construct R if symmetry == 'symmetric': # R should reflect A's structure levels[i].R = levels[i].P.T.asformat(levels[i].P.format) elif symmetry == 'hermitian': levels[i].R = levels[i].P.H.asformat(levels[i].P.format) # construct coarse A levels[i + 1].A = levels[i].R * levels[i].A * levels[i].P # construct bridging P T_bridge = make_bridge(levels[i + 1].T) R_bridge = levels[i + 2].B # smooth bridging P fn, kwargs = unpack_arg(smooth[i + 1]) if fn == 'jacobi': levels[i + 1].P = jacobi_prolongation_smoother( levels[i + 1].A, T_bridge, levels[i + 1].C, R_bridge, **kwargs) elif fn == 'richardson': levels[i + 1].P = richardson_prolongation_smoother( levels[i + 1].A, T_bridge, **kwargs) elif fn == 'energy': levels[i + 1].P = energy_prolongation_smoother( levels[i + 1].A, T_bridge, levels[i + 1].C, R_bridge, None, (False, {}), **kwargs) elif fn is None: levels[i + 1].P = T_bridge else: raise ValueError('unrecognized prolongation smoother method %s' % str(fn)) # construct the "bridging" R if symmetry == 'symmetric': # R should reflect A's structure levels[i + 1].R = levels[i + 1].P.T.asformat(levels[i + 1].P.format) elif symmetry == 'hermitian': levels[i + 1].R = levels[i + 1].P.H.asformat(levels[i + 1].P.format) # run solver on candidate solver = multilevel_solver(levels[i + 1:], coarse_solver=coarse_solver) change_smoothers(solver, presmoother=prepostsmoother, postsmoother=prepostsmoother) x = solver.solve(np.zeros_like(x), x0=x, tol=float(np.finfo(np.float).tiny), maxiter=candidate_iters) work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\ candidate_iters*2 # update values on next level levels[i + 1].B = R[:, :-1].copy() levels[i + 1].T = T_bridge # note that we only use the x from the second coarsest level fn, kwargs = unpack_arg(prepostsmoother) for lvl in reversed(levels[:-2]): x = lvl.P * x work[:] += lvl.A.nnz * candidate_iters * 2 if fn == 'gauss_seidel': # only relax at nonzeros, so as not to mess up any locally dropped # candidates indices = np.ravel(x).nonzero()[0] gauss_seidel_indexed(lvl.A, x, np.zeros_like(x), indices, iterations=candidate_iters, sweep='symmetric') elif fn == 'gauss_seidel_ne': gauss_seidel_ne(lvl.A, x, np.zeros_like(x), iterations=candidate_iters, sweep='symmetric') elif fn == 'gauss_seidel_nr': gauss_seidel_nr(lvl.A, x, np.zeros_like(x), iterations=candidate_iters, sweep='symmetric') elif fn == 'jacobi': jacobi(lvl.A, x, np.zeros_like(x), iterations=1, omega=1.0 / rho_D_inv_A(lvl.A)) elif fn == 'richardson': polynomial(lvl.A, x, np.zeros_like(x), iterations=1, coeffients=[1.0 / approximate_spectral_radius(lvl.A)]) elif fn == 'gmres': x[:] = (gmres(lvl.A, np.zeros_like(x), x0=x, maxiter=candidate_iters)[0]).reshape(x.shape) else: raise TypeError('Unrecognized smoother') # x will be dense again, so we have to drop locally again elim, elim_kwargs = unpack_arg(eliminate_local) if elim is True: x = x / norm(x, 'inf') eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0, **elim_kwargs) return x.reshape(-1, 1)
def initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon, max_levels, max_coarse, aggregate, prepostsmoother, smooth, strength, work, initial_candidate=None): """ Computes a complete aggregation and the first near-nullspace candidate following Algorithm 3 in Brezina et al. Parameters ---------- candidate_iters number of test relaxation iterations epsilon minimum acceptable relaxation convergence factor References ---------- .. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge "Adaptive Smoothed Aggregation ($\alpha$SA) Multigrid" SIAM Review Volume 47, Issue 2 (2005) http://www.cs.umn.edu/~maclach/research/aSA2.pdf """ # Define relaxation routine def relax(A, x): fn, kwargs = unpack_arg(prepostsmoother) if fn == 'gauss_seidel': gauss_seidel(A, x, np.zeros_like(x), iterations=candidate_iters, sweep='symmetric') elif fn == 'gauss_seidel_nr': gauss_seidel_nr(A, x, np.zeros_like(x), iterations=candidate_iters, sweep='symmetric') elif fn == 'gauss_seidel_ne': gauss_seidel_ne(A, x, np.zeros_like(x), iterations=candidate_iters, sweep='symmetric') elif fn == 'jacobi': jacobi(A, x, np.zeros_like(x), iterations=1, omega=1.0 / rho_D_inv_A(A)) elif fn == 'richardson': polynomial(A, x, np.zeros_like(x), iterations=1, coeffients=[1.0 / approximate_spectral_radius(A)]) elif fn == 'gmres': x[:] = (gmres(A, np.zeros_like(x), x0=x, maxiter=candidate_iters)[0]).reshape(x.shape) else: raise TypeError('Unrecognized smoother') # flag for skipping steps f-i in step 4 skip_f_to_i = True # step 1 A_l = A if initial_candidate is None: x = sp.rand(A_l.shape[0], 1) if A_l.dtype == complex: x = x + 1.0j * sp.rand(A_l.shape[0], 1) else: x = np.array(initial_candidate, dtype=A_l.dtype) # step 2 relax(A_l, x) work[:] += A_l.nnz * candidate_iters * 2 # step 3 # not advised to stop the iteration here: often the first relaxation pass # _is_ good, but the remaining passes are poor # if x_A_x/x_A_x_old < epsilon: # # relaxation alone is sufficient # print 'relaxation alone works: %g'%(x_A_x/x_A_x_old) # return x, [] # step 4 As = [A] xs = [x] Ps = [] AggOps = [] StrengthOps = [] while A.shape[0] > max_coarse and max_levels > 1: # The real check to break from the while loop is below # Begin constructing next level fn, kwargs = unpack_arg(strength[len(As) - 1]) # step 4b if fn == 'symmetric': C_l = symmetric_strength_of_connection(A_l, **kwargs) # Diagonal must be nonzero C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr') elif fn == 'classical': C_l = classical_strength_of_connection(A_l, **kwargs) # Diagonal must be nonzero C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr') if isspmatrix_bsr(A_l): C_l = amalgamate(C_l, A_l.blocksize[0]) elif (fn == 'ode') or (fn == 'evolution'): C_l = evolution_strength_of_connection( A_l, np.ones((A_l.shape[0], 1), dtype=A.dtype), **kwargs) elif fn == 'predefined': C_l = kwargs['C'].tocsr() elif fn is None: C_l = A_l.tocsr() else: raise ValueError('unrecognized strength of connection method: %s' % str(fn)) # In SA, strength represents "distance", so we take magnitude of # complex values if C_l.dtype == complex: C_l.data = np.abs(C_l.data) # Create a unified strength framework so that large values represent # strong connections and small values represent weak connections if (fn == 'ode') or (fn == 'evolution') or (fn == 'energy_based'): C_l.data = 1.0 / C_l.data # aggregation fn, kwargs = unpack_arg(aggregate[len(As) - 1]) if fn == 'standard': AggOp = standard_aggregation(C_l, **kwargs)[0] elif fn == 'lloyd': AggOp = lloyd_aggregation(C_l, **kwargs)[0] elif fn == 'predefined': AggOp = kwargs['AggOp'].tocsr() else: raise ValueError('unrecognized aggregation method %s' % str(fn)) T_l, x = fit_candidates(AggOp, x) # step 4c fn, kwargs = unpack_arg(smooth[len(As) - 1]) # step 4d if fn == 'jacobi': P_l = jacobi_prolongation_smoother(A_l, T_l, C_l, x, **kwargs) elif fn == 'richardson': P_l = richardson_prolongation_smoother(A_l, T_l, **kwargs) elif fn == 'energy': P_l = energy_prolongation_smoother(A_l, T_l, C_l, x, None, (False, {}), **kwargs) elif fn is None: P_l = T_l else: raise ValueError('unrecognized prolongation smoother method %s' % str(fn)) # R should reflect A's structure # step 4e if symmetry == 'symmetric': A_l = P_l.T.asformat(P_l.format) * A_l * P_l elif symmetry == 'hermitian': A_l = P_l.H.asformat(P_l.format) * A_l * P_l StrengthOps.append(C_l) AggOps.append(AggOp) Ps.append(P_l) As.append(A_l) # skip to step 5 as in step 4e if (A_l.shape[0] <= max_coarse) or (len(AggOps) + 1 >= max_levels): break if not skip_f_to_i: x_hat = x.copy() # step 4g relax(A_l, x) # step 4h work[:] += A_l.nnz * candidate_iters * 2 if pdef is True: x_A_x = np.dot(np.conjugate(x).T, A_l * x) xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l * x_hat) err_ratio = (x_A_x / xhat_A_xhat)**(1.0 / candidate_iters) else: # use A.H A inner-product Ax = A_l * x # Axhat = A_l * x_hat x_A_x = np.dot(np.conjugate(Ax).T, Ax) xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l * x_hat) err_ratio = (x_A_x / xhat_A_xhat)**(1.0 / candidate_iters) if err_ratio < epsilon: # step 4i # print "sufficient convergence, skipping" skip_f_to_i = True if x_A_x == 0: x = x_hat # need to restore x else: # just carry out relaxation, don't check for convergence relax(A_l, x) # step 4h work[:] += 2 * A_l.nnz * candidate_iters # store xs for diagnostic use and for use in step 5 xs.append(x) # step 5 # Extend coarse-level candidate to the finest level # --> note that we start with the x from the second coarsest level x = xs[-1] # make sure that xs[-1] has been relaxed by step 4h, i.e. relax(As[-2], x) for lev in range(len(Ps) - 2, -1, -1): # lev = coarsest ... finest-1 P = Ps[lev] # I: lev --> lev+1 A = As[lev] # A on lev+1 x = P * x relax(A, x) work[:] += A.nnz * candidate_iters * 2 # Set predefined strength of connection and aggregation if len(AggOps) > 1: aggregate = [('predefined', { 'AggOp': AggOps[i] }) for i in range(len(AggOps))] strength = [('predefined', { 'C': StrengthOps[i] }) for i in range(len(StrengthOps))] return x, aggregate, strength # first candidate