def residualCheck(A, B, U, d): """ Test the l2 norm of the residual: r[:,i] = d[i] B U[:,i] - A U[:,i] """ u = Vector() Au = Vector() Bu = Vector() Binv_r = Vector() A.init_vector(u, 1) A.init_vector(Au, 0) B.init_vector(Bu, 0) B.init_vector(Binv_r, 0) nvec = d.shape[0] print("lambda", "||Au - lambdaBu||") for i in range(0, nvec): u.set_local(U[:, i]) A.mult(u, Au) B.mult(u, Bu) Au.axpy(-d[i], Bu) print(d[i], Au.norm("l2"))
class CGSampler: """ This class implements the CG sampler algorithm to generate samples from N(0, A^-1). REFERENCE: Albert Parker and Colin Fox Sampling Gaussian Distributions in Krylov Spaces with Conjugate Gradient SIAM J SCI COMPUT, Vol 34, No. 3 pp. B312-B334 """ def __init__(self): """ Construct the solver with default parameters tolerance = 1e-4 print_level = 0 verbose = 0 """ self.parameters = {} self.parameters["tolerance"] = 1e-4 self.parameters["print_level"] = 0 self.parameters["verbose"] = 0 self.A = None self.converged = False self.iter = 0 self.b = Vector() self.r = Vector() self.p = Vector() self.Ap = Vector() def set_operator(self, A): """ Set the operator A, such that x ~ N(0, A^-1). Note A is any object that provides the methods init_vector and mult. """ self.A = A self.A.init_vector(self.r, 0) self.A.init_vector(self.p, 0) self.A.init_vector(self.Ap, 0) self.A.init_vector(self.b, 0) self.b.set_local(np.random.randn(self.b.array().shape[0])) def sample(self, noise, s): """ Generate a sample s ~ N(0, A^-1). noise is a numpy.array of i.i.d. normal variables used as input. For a fixed realization of noise the algorithm is fully deterministic. The size of noise determine the maximum number of CG iterations. """ s.zero() self.iter = 0 self.converged = False # r0 = b self.r.zero() self.r.axpy(1., self.b) #p0 = r0 self.p.zero() self.p.axpy(1., self.r) self.A.mult(self.p, self.Ap) d = self.p.inner(self.Ap) tol2 = self.parameters["tolerance"] * self.parameters["tolerance"] rnorm2_old = self.r.inner(self.r) if self.parameters["verbose"] > 0: print "initial residual = ", math.sqrt(rnorm2_old) while (not self.converged) and (self.iter < noise.shape[0]): gamma = rnorm2_old / d s.axpy(noise[self.iter] / math.sqrt(d), self.p) self.r.axpy(-gamma, self.Ap) rnorm2 = self.r.inner(self.r) beta = rnorm2 / rnorm2_old # p_new = r + beta p self.p *= beta self.p.axpy(1., self.r) self.A.mult(self.p, self.Ap) d = self.p.inner(self.Ap) rnorm2_old = rnorm2 if rnorm2 < tol2: self.converged = True else: rnorm2_old = rnorm2 self.iter = self.iter + 1 if self.parameters["verbose"] > 0: print "Final residual {0} after {1} iterations".format( math.sqrt(rnorm2_old), self.iter)
class OasisFunction(Function): """Function with more or less efficient projection methods of associated linear form. The matvec option is provided for letting the right hand side be computed through a fast matrix vector product. Both the matrix and the Coefficient of the required vector must be provided. method = "default" Solve projection with regular linear algebra using solver_type and preconditioner_type method = "lumping" Solve through lumping of mass matrix """ def __init__(self, form, Space, bcs=[], name="x", matvec=[None, None], method="default", solver_type="cg", preconditioner_type="default"): Function.__init__(self, Space, name=name) self.form = form self.method = method self.bcs = bcs self.matvec = matvec self.trial = trial = TrialFunction(Space) self.test = test = TestFunction(Space) Mass = inner(trial, test) * dx() self.bf = inner(form, test) * dx() self.rhs = Vector(self.vector()) if method.lower() == "default": self.A = A_cache[(Mass, tuple(bcs))] self.sol = Solver_cache[(Mass, tuple( bcs), solver_type, preconditioner_type)] elif method.lower() == "lumping": assert Space.ufl_element().degree() < 2 self.A = A_cache[(Mass, tuple(bcs))] ones = Function(Space) ones.vector()[:] = 1. self.ML = self.A * ones.vector() self.ML.set_local(1. / self.ML.array()) def assemble_rhs(self): """ Assemble right hand side (form*test*dx) in projection """ if not self.matvec[0] is None: mat, func = self.matvec self.rhs.zero() self.rhs.axpy(1.0, mat * func.vector()) else: assemble(self.bf, tensor=self.rhs) def __call__(self, assemb_rhs=True): """ Compute the projection """ timer = Timer("Projecting {}".format(self.name())) if assemb_rhs: self.assemble_rhs() for bc in self.bcs: bc.apply(self.rhs) if self.method.lower() == "default": self.sol.solve(self.A, self.vector(), self.rhs) else: self.vector().zero() self.vector().axpy(1.0, self.rhs * self.ML)
class OasisFunction(Function): """Function with more or less efficient projection methods of associated linear form. The matvec option is provided for letting the right hand side be computed through a fast matrix vector product. Both the matrix and the Coefficient of the required vector must be provided. method = "default" Solve projection with regular linear algebra using solver_type and preconditioner_type method = "lumping" Solve through lumping of mass matrix """ def __init__(self, form, Space, bcs=[], name="x", matvec=[None, None], method="default", solver_type="cg", preconditioner_type="default"): Function.__init__(self, Space, name=name) self.form = form self.method = method self.bcs = bcs self.matvec = matvec self.trial = trial = TrialFunction(Space) self.test = test = TestFunction(Space) Mass = inner(trial, test) * dx() self.bf = inner(form, test) * dx() self.rhs = Vector(self.vector()) if method.lower() == "default": self.A = A_cache[(Mass, tuple(bcs))] self.sol = Solver_cache[(Mass, tuple(bcs), solver_type, preconditioner_type)] elif method.lower() == "lumping": assert Space.ufl_element().degree() < 2 self.A = A_cache[(Mass, tuple(bcs))] ones = Function(Space) ones.vector()[:] = 1. self.ML = self.A * ones.vector() self.ML.set_local(1. / self.ML.array()) def assemble_rhs(self): """ Assemble right hand side (form*test*dx) in projection """ if not self.matvec[0] is None: mat, func = self.matvec self.rhs.zero() self.rhs.axpy(1.0, mat * func.vector()) else: assemble(self.bf, tensor=self.rhs) def __call__(self, assemb_rhs=True): """ Compute the projection """ timer = Timer("Projecting {}".format(self.name())) if assemb_rhs: self.assemble_rhs() for bc in self.bcs: bc.apply(self.rhs) if self.method.lower() == "default": self.sol.solve(self.A, self.vector(), self.rhs) else: self.vector().zero() self.vector().axpy(1.0, self.rhs * self.ML)
class CGSolverSteihaug: """ Solve the linear system A x = b using preconditioned conjugate gradient ( B preconditioner) and the Steihaug stopping criterion: - reason of termination 0: we reached the maximum number of iterations (no convergence) - reason of termination 1: we reduced the residual up to the given tolerance (convergence) - reason of termination 2: we reached a negative direction (premature termination due to not spd matrix) The stopping criterion is based on either - the absolute preconditioned residual norm check: || r^* ||_{B^{-1}} < atol - the relative preconditioned residual norm check: || r^* ||_{B^{-1}}/|| r^0 ||_{B^{-1}} < rtol where r^* = b - Ax^* is the residual at convergence and r^0 = b - Ax^0 is the initial residual. The operator A is set using the method set_operator(A). A must provide the following two methods: - A.mult(x,y): y = A*x - A.init_vector(x, dim): initialize the vector x so that it is compatible with the range (dim = 0) or the domain (dim = 1) of A. The preconditioner B is set using the method set_preconditioner(B). B must provide the following method: - B.solve(z,r): z is the action of the preconditioner B on the vector r To solve the linear system A*x = b call self.solve(x,b). Here x and b are assumed to be FEniCS::Vector objects. The parameter attributes allows to set: - rel_tolerance : the relative tolerance for the stopping criterion - abs_tolerance : the absolute tolerance for the stopping criterion - max_iter : the maximum number of iterations - zero_initial_guess: if True we start with a 0 initial guess if False we use the x as initial guess. - print_level : verbosity level: -1 --> no output on screen 0 --> only final residual at convergence or reason for not not convergence """ reason = [ "Maximum Number of Iterations Reached", "Relative/Absolute residual less than tol", "Reached a negative direction" ] def __init__(self): self.parameters = {} self.parameters["rel_tolerance"] = 1e-9 self.parameters["abs_tolerance"] = 1e-12 self.parameters["max_iter"] = 1000 self.parameters["zero_initial_guess"] = True self.parameters["print_level"] = 0 self.A = None self.B = None self.converged = False self.iter = 0 self.reasonid = 0 self.final_norm = 0 self.r = Vector() self.z = Vector() self.d = Vector() def set_operator(self, A): """ Set the operator A. """ self.A = A self.A.init_vector(self.r, 0) self.A.init_vector(self.z, 0) self.A.init_vector(self.d, 0) def set_preconditioner(self, B): """ Set the preconditioner B. """ self.B = B def solve(self, x, b): """ Solve the linear system Ax = b """ self.iter = 0 self.converged = False self.reasonid = 0 betanom = 0.0 alpha = 0.0 beta = 0.0 if self.parameters["zero_initial_guess"]: self.r.zero() self.r.axpy(1.0, b) x.zero() else: self.A.mult(x, self.r) self.r *= -1.0 self.r.axpy(1.0, b) self.z.zero() self.B.solve(self.z, self.r) #z = B^-1 r self.d.zero() self.d.axpy(1., self.z) #d = z nom0 = self.d.inner(self.r) nom = nom0 if self.parameters["print_level"] == 1: print(" Iterartion : ", 0, " (B r, r) = ", nom) rtol2 = nom * self.parameters["rel_tolerance"] * self.parameters[ "rel_tolerance"] atol2 = self.parameters["abs_tolerance"] * self.parameters[ "abs_tolerance"] r0 = max(rtol2, atol2) if nom <= r0: self.converged = True self.reasonid = 1 self.final_norm = math.sqrt(nom) if (self.parameters["print_level"] >= 0): print(self.reason[self.reasonid]) print("Converged in ", self.iter, " iterations with final norm ", self.final_norm) return self.A.mult(self.d, self.z) #z = A d den = self.z.inner(self.d) if den <= 0.0: self.converged = True self.reasonid = 2 self.final_norm = math.sqrt(nom) if (self.parameters["print_level"] >= 0): print(self.reason[self.reasonid]) print("Converged in ", self.iter, " iterations with final norm ", self.final_norm) return # start iteration self.iter = 1 while True: alpha = nom / den x.axpy(alpha, self.d) # x = x + alpha d self.r.axpy(-alpha, self.z) # r = r - alpha A d self.B.solve(self.z, self.r) # z = B^-1 r betanom = self.r.inner(self.z) if self.parameters["print_level"] == 1: print(" Iteration : ", self.iter, " (B r, r) = ", betanom) if betanom < r0: self.converged = True self.reasonid = 1 self.final_norm = math.sqrt(betanom) if (self.parameters["print_level"] >= 0): print(self.reason[self.reasonid]) print("Converged in ", self.iter, " iterations with final norm ", self.final_norm) break self.iter += 1 if self.iter > self.parameters["max_iter"]: self.converged = False self.reasonid = 0 self.final_norm = math.sqrt(betanom) if (self.parameters["print_level"] >= 0): print(self.reason[self.reasonid]) print("Not Converged. Final residual norm ", self.final_norm) break beta = betanom / nom self.d *= beta self.d.axpy(1., self.z) #d = z + beta d self.A.mult(self.d, self.z) # z = A d den = self.d.inner(self.z) if den <= 0.0: self.converged = True self.reasonid = 2 self.final_norm = math.sqrt(nom) if (self.parameters["print_level"] >= 0): print(self.reason[self.reasonid]) print("Converged in ", self.iter, " iterations with final norm ", self.final_norm) break nom = betanom
class CGSolverSteihaug: """ Solve the linear system A x = b using preconditioned conjugate gradient ( B preconditioner) and the Steihaug stopping criterion: - reason of termination 0: we reached the maximum number of iterations (no convergence) - reason of termination 1: we reduced the residual up to the given tolerance (convergence) - reason of termination 2: we reached a negative direction (premature termination due to not spd matrix) The operator A is set using the method set_operator(A). A must provide the following two methods: - A.mult(x,y): y = A*x - A.init_vector(x, dim): initialize the vector x so that it is compatible with the range (dim = 0) or the domain (dim = 1) of A. The preconditioner B is set using the method set_preconditioner(B). B must provide the following method: - B.solve(z,r): z is the action of the preconditioner B on the vector r To solve the linear system A*x = b call solve(x,b). Here x and b are assumed to be FEniCS::Vector objects Maximum number of iterations, tolerances, verbosity level etc can be set using the parameters attributes. """ reason = [ "Maximum Number of Iterations Reached", "Relative/Absolute residual less than tol", "Reached a negative direction" ] def __init__(self): self.parameters = {} self.parameters["rel_tolerance"] = 1e-9 self.parameters["abs_tolerance"] = 1e-12 self.parameters["max_iter"] = 1000 self.parameters["zero_initial_guess"] = True self.parameters["print_level"] = 0 self.A = None self.B = None self.converged = False self.iter = 0 self.reasonid = 0 self.final_norm = 0 self.r = Vector() self.z = Vector() self.d = Vector() if dolfin.__version__[2] == '3': self.vrs130 = True else: self.vrs130 = False def set_operator(self, A): self.A = A if self.vrs130: self.r = self.A.init_vector130() self.z = self.A.init_vector130() self.d = self.A.init_vector130() else: self.A.init_vector(self.r, 0) self.A.init_vector(self.z, 0) self.A.init_vector(self.d, 0) def set_preconditioner(self, B): self.B = B def solve(self, x, b): self.iter = 0 self.converged = False self.reasonid = 0 betanom = 0.0 alpha = 0.0 beta = 0.0 if self.parameters["zero_initial_guess"]: self.r.zero() self.r.axpy(1.0, b) x.zero() else: self.A.mult(x, self.r) self.r *= -1.0 self.r.axpy(1.0, b) self.z.zero() self.B.solve(self.z, self.r) #z = B^-1 r self.d.zero() self.d.axpy(1., self.z) #d = z nom0 = self.d.inner(self.r) nom = nom0 if self.parameters["print_level"] == 1: print " Iterartion : ", 0, " (B r, r) = ", nom rtol2 = nom * self.parameters["rel_tolerance"] * self.parameters[ "rel_tolerance"] atol2 = self.parameters["abs_tolerance"] * self.parameters[ "abs_tolerance"] r0 = max(rtol2, atol2) if nom <= r0: self.converged = True self.reasonid = 1 self.final_norm = math.sqrt(nom) if (self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm return self.A.mult(self.d, self.z) #z = A d den = self.z.inner(self.d) if den <= 0.0: self.converged = True self.reasonid = 2 self.final_norm = math.sqrt(nom) if (self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm return # start iteration self.iter = 1 while True: alpha = nom / den x.axpy(alpha, self.d) # x = x + alpha d self.r.axpy(-alpha, self.z) # r = r - alpha A d self.B.solve(self.z, self.r) # z = B^-1 r betanom = self.r.inner(self.z) if self.parameters["print_level"] == 1: print " Iteration : ", self.iter, " (B r, r) = ", betanom if betanom < r0: self.converged = True self.reasonid = 1 self.final_norm = math.sqrt(betanom) if (self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm break self.iter += 1 if self.iter > self.parameters["max_iter"]: self.converged = False self.reasonid = 0 self.final_norm = math.sqrt(betanom) if (self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Not Converged. Final residual norm ", self.final_norm break beta = betanom / nom self.d *= beta self.d.axpy(1., self.z) #d = z + beta d self.A.mult(self.d, self.z) # z = A d den = self.d.inner(self.z) if den <= 0.0: self.converged = True self.reasonid = 2 self.final_norm = math.sqrt(nom) if (self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm break nom = betanom
class CGSampler: """ This class implements the CG sampler algorithm to generate samples from :math:`\mathcal{N}(0, A^{-1})`. Reference: `Albert Parker and Colin Fox Sampling Gaussian Distributions in Krylov Spaces with Conjugate Gradient SIAM J SCI COMPUT, Vol 34, No. 3 pp. B312-B334` """ def __init__(self): """ Construct the solver with default parameters :code:`tolerance = 1e-4` :code:`print_level = 0` :code:`verbose = 0` """ self.parameters = {} self.parameters["tolerance"] = 1e-4 self.parameters["print_level"] = 0 self.parameters["verbose"] = 0 self.A = None self.converged = False self.iter = 0 self.b = Vector() self.r = Vector() self.p = Vector() self.Ap = Vector() def set_operator(self, A): """ Set the operator :code:`A`, such that :math:`x \sim \mathcal{N}(0, A^{-1})`. .. note:: :code:`A` is any object that provides the methods :code:`init_vector()` and :code:`mult()` """ self.A = A self.A.init_vector(self.r, 0) self.A.init_vector(self.p, 0) self.A.init_vector(self.Ap, 0) self.A.init_vector(self.b, 0) parRandom.normal(1., self.b) def sample(self, noise, s): """ Generate a sample :math:`s ~ N(0, A^{-1})`. :code:`noise` is a :code:`numpy.array` of i.i.d. normal variables used as input. For a fixed realization of noise the algorithm is fully deterministic. The size of noise determine the maximum number of CG iterations. """ s.zero() self.iter = 0 self.converged = False # r0 = b self.r.zero() self.r.axpy(1., self.b) #p0 = r0 self.p.zero() self.p.axpy(1., self.r) self.A.mult(self.p, self.Ap) d = self.p.inner(self.Ap) tol2 = self.parameters["tolerance"] * self.parameters["tolerance"] rnorm2_old = self.r.inner(self.r) if self.parameters["verbose"] > 0: print("initial residual = {0:g}".format(math.sqrt(rnorm2_old))) while (not self.converged) and (self.iter < noise.shape[0]): gamma = rnorm2_old / d s.axpy(noise[self.iter] / math.sqrt(d), self.p) self.r.axpy(-gamma, self.Ap) rnorm2 = self.r.inner(self.r) beta = rnorm2 / rnorm2_old # p_new = r + beta p self.p *= beta self.p.axpy(1., self.r) self.A.mult(self.p, self.Ap) d = self.p.inner(self.Ap) rnorm2_old = rnorm2 if rnorm2 < tol2: self.converged = True else: rnorm2_old = rnorm2 self.iter = self.iter + 1 if self.parameters["verbose"] > 0: print("Final residual {0} after {1} iterations".format( math.sqrt(rnorm2_old), self.iter))
class CGSolverSteihaug: """ Solve the linear system A x = b using preconditioned conjugate gradient ( B preconditioner) and the Steihaug stopping criterion: - reason of termination 0: we reached the maximum number of iterations (no convergence) - reason of termination 1: we reduced the residual up to the given tolerance (convergence) - reason of termination 2: we reached a negative direction (premature termination due to not spd matrix) - reason of termination 3: we reached the boundary of the trust region The stopping criterion is based on either - the absolute preconditioned residual norm check: || r^* ||_{B^{-1}} < atol - the relative preconditioned residual norm check: || r^* ||_{B^{-1}}/|| r^0 ||_{B^{-1}} < rtol where r^* = b - Ax^* is the residual at convergence and r^0 = b - Ax^0 is the initial residual. The operator A is set using the method set_operator(A). A must provide the following two methods: - A.mult(x,y): y = A*x - A.init_vector(x, dim): initialize the vector x so that it is compatible with the range (dim = 0) or the domain (dim = 1) of A. The preconditioner B is set using the method set_preconditioner(B). B must provide the following method: - B.solve(z,r): z is the action of the preconditioner B on the vector r To solve the linear system A*x = b call self.solve(x,b). Here x and b are assumed to be FEniCS::Vector objects. Type: CGSolverSteihaug_ParameterList().showMe() for default parameters and their descriptions """ reason = ["Maximum Number of Iterations Reached", "Relative/Absolute residual less than tol", "Reached a negative direction", "Reached trust region boundary" ] def __init__(self, parameters=CGSolverSteihaug_ParameterList()): self.parameters = parameters self.A = None self.B_solver = None self.B_op = None self.converged = False self.iter = 0 self.reasonid = 0 self.final_norm = 0 self.TR_radius_2 = None self.update_x = self.update_x_without_TR self.r = Vector() self.z = Vector() self.d = Vector() self.Bx = Vector() def set_operator(self, A): """ Set the operator A. """ self.A = A self.A.init_vector(self.r,0) self.A.init_vector(self.z,0) self.A.init_vector(self.d,0) def set_preconditioner(self, B_solver): """ Set the preconditioner B. """ self.B_solver = B_solver def set_TR(self,radius,B_op): assert self.parameters["zero_initial_guess"] self.TR_radius_2 = radius*radius self.update_x = self.update_x_with_TR self.B_op = B_op self.B_op.init_vector(self.Bx,0) def update_x_without_TR(self,x,alpha,d): x.axpy(alpha,d) return False def update_x_with_TR(self,x,alpha,d): x_bk = x.copy() x.axpy(alpha,d) self.Bx.zero() self.B_op.mult(x, self.Bx) x_Bnorm2 = self.Bx.inner(x) if x_Bnorm2 < self.TR_radius_2: return False else: # Move point to boundary of trust region self.Bx.zero() self.B_op.mult(x_bk, self.Bx) x_Bnorm2 = self.Bx.inner(x_bk) Bd = Vector() self.B_op.init_vector(Bd,0) Bd.zero() self.B_op.mult(self.d,Bd) d_Bnorm2 = Bd.inner(d) d_Bx = Bd.inner(x_bk) a_tau = alpha*alpha*d_Bnorm2 b_tau_half = alpha* d_Bx c_tau = x_Bnorm2- self.TR_radius_2 # Solve quadratic for tau tau = (-b_tau_half + math.sqrt(b_tau_half*b_tau_half - a_tau*c_tau))/a_tau x.zero() x.axpy(1,x_bk) x.axpy(tau*alpha, d) return True def solve(self,x,b): """ Solve the linear system Ax = b """ self.iter = 0 self.converged = False self.reasonid = 0 betanom = 0.0 alpha = 0.0 beta = 0.0 if self.parameters["zero_initial_guess"]: self.r.zero() self.r.axpy(1.0, b) x.zero() else: assert self.TR_radius_2==None self.A.mult(x,self.r) self.r *= -1.0 self.r.axpy(1.0, b) self.z.zero() self.B_solver.solve(self.z,self.r) #z = B^-1 r self.d.zero() self.d.axpy(1.,self.z); #d = z nom0 = self.d.inner(self.r) nom = nom0 if self.parameters["print_level"] == 1: print " Iterartion : ", 0, " (B r, r) = ", nom rtol2 = nom * self.parameters["rel_tolerance"] * self.parameters["rel_tolerance"] atol2 = self.parameters["abs_tolerance"] * self.parameters["abs_tolerance"] r0 = max(rtol2, atol2) if nom <= r0: self.converged = True self.reasonid = 1 self.final_norm = math.sqrt(nom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm return self.A.mult(self.d, self.z) #z = A d den = self.z.inner(self.d) if den <= 0.0: self.converged = True self.reasonid = 2 x.axpy(1., self.d) self.r.axpy(-1., self.z) self.B_solver.solve(self.z, self.r) nom = self.r.inner(self.z) self.final_norm = math.sqrt(nom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm return # start iteration self.iter = 1 while True: alpha = nom/den TrustBool = self.update_x(x,alpha,self.d) # x = x + alpha d if TrustBool == True: self.converged = True self.reasonid = 3 self.final_norm = math.sqrt(betanom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm break self.r.axpy(-alpha, self.z) # r = r - alpha A d self.B_solver.solve(self.z, self.r) # z = B^-1 r betanom = self.r.inner(self.z) if self.parameters["print_level"] == 1: print " Iteration : ", self.iter, " (B r, r) = ", betanom if betanom < r0: self.converged = True self.reasonid = 1 self.final_norm = math.sqrt(betanom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm break self.iter += 1 if self.iter > self.parameters["max_iter"]: self.converged = False self.reasonid = 0 self.final_norm = math.sqrt(betanom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Not Converged. Final residual norm ", self.final_norm break beta = betanom/nom self.d *= beta self.d.axpy(1., self.z) #d = z + beta d self.A.mult(self.d,self.z) # z = A d den = self.d.inner(self.z) if den <= 0.0: self.converged = True self.reasonid = 2 self.final_norm = math.sqrt(nom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm break nom = betanom
class CGSampler: """ This class implements the CG sampler algorithm to generate samples from :math:`\mathcal{N}(0, A^{-1})`. Reference: `Albert Parker and Colin Fox Sampling Gaussian Distributions in Krylov Spaces with Conjugate Gradient SIAM J SCI COMPUT, Vol 34, No. 3 pp. B312-B334` """ def __init__(self): """ Construct the solver with default parameters :code:`tolerance = 1e-4` :code:`print_level = 0` :code:`verbose = 0` """ self.parameters = {} self.parameters["tolerance"] = 1e-4 self.parameters["print_level"] = 0 self.parameters["verbose"] = 0 self.A = None self.converged = False self.iter = 0 self.b = Vector() self.r = Vector() self.p = Vector() self.Ap = Vector() def set_operator(self, A): """ Set the operator :code:`A`, such that :math:`x \sim \mathcal{N}(0, A^{-1})`. .. note:: :code:`A` is any object that provides the methods :code:`init_vector()` and :code:`mult()` """ self.A = A self.A.init_vector(self.r,0) self.A.init_vector(self.p,0) self.A.init_vector(self.Ap,0) self.A.init_vector(self.b,0) parRandom.normal(1., self.b) def sample(self, noise, s): """ Generate a sample :math:`s ~ N(0, A^{-1})`. :code:`noise` is a :code:`numpy.array` of i.i.d. normal variables used as input. For a fixed realization of noise the algorithm is fully deterministic. The size of noise determine the maximum number of CG iterations. """ s.zero() self.iter = 0 self.converged = False # r0 = b self.r.zero() self.r.axpy(1., self.b) #p0 = r0 self.p.zero() self.p.axpy(1., self.r) self.A.mult(self.p, self.Ap) d = self.p.inner(self.Ap) tol2 = self.parameters["tolerance"]*self.parameters["tolerance"] rnorm2_old = self.r.inner(self.r) if self.parameters["verbose"] > 0: print("initial residual = {0:g}".format( math.sqrt(rnorm2_old) )) while (not self.converged) and (self.iter < noise.shape[0]): gamma = rnorm2_old/d s.axpy(noise[self.iter]/math.sqrt(d), self.p) self.r.axpy(-gamma, self.Ap) rnorm2 = self.r.inner(self.r) beta = rnorm2/rnorm2_old # p_new = r + beta p self.p *= beta self.p.axpy(1., self.r) self.A.mult(self.p, self.Ap) d = self.p.inner(self.Ap) rnorm2_old = rnorm2 if rnorm2 < tol2: self.converged = True else: rnorm2_old = rnorm2 self.iter = self.iter+1 if self.parameters["verbose"] > 0: print("Final residual {0} after {1} iterations".format( math.sqrt(rnorm2_old), self.iter))
class CGSolverSteihaug: """ Solve the linear system A x = b using preconditioned conjugate gradient ( B preconditioner) and the Steihaug stopping criterion: - reason of termination 0: we reached the maximum number of iterations (no convergence) - reason of termination 1: we reduced the residual up to the given tolerance (convergence) - reason of termination 2: we reached a negative direction (premature termination due to not spd matrix) The operator A is set using the method set_operator(A). A must provide the following two methods: - A.mult(x,y): y = A*x - A.init_vector(x, dim): initialize the vector x so that it is compatible with the range (dim = 0) or the domain (dim = 1) of A. The preconditioner B is set using the method set_preconditioner(B). B must provide the following method: - B.solve(z,r): z is the action of the preconditioner B on the vector r To solve the linear system A*x = b call solve(x,b). Here x and b are assumed to be FEniCS::Vector objects Maximum number of iterations, tolerances, verbosity level etc can be set using the parameters attributes. """ reason = ["Maximum Number of Iterations Reached", "Relative/Absolute residual less than tol", "Reached a negative direction" ] def __init__(self): self.parameters = {} self.parameters["rel_tolerance"] = 1e-9 self.parameters["abs_tolerance"] = 1e-12 self.parameters["max_iter"] = 1000 self.parameters["zero_initial_guess"] = True self.parameters["print_level"] = 0 self.A = None self.B = None self.converged = False self.iter = 0 self.reasonid = 0 self.final_norm = 0 self.r = Vector() self.z = Vector() self.d = Vector() if dolfin.__version__[2] == '5': self.vrs150 = True else: self.vrs150 = False def set_operator(self, A): self.A = A if self.vrs150: self.A.init_vector(self.r,0) self.A.init_vector(self.z,0) self.A.init_vector(self.d,0) else: self.r = self.A.init_vector130() self.z = self.A.init_vector130() self.d = self.A.init_vector130() def set_preconditioner(self, B): self.B = B def solve(self,x,b): self.iter = 0 self.converged = False self.reasonid = 0 betanom = 0.0 alpha = 0.0 beta = 0.0 if self.parameters["zero_initial_guess"]: self.r.zero() self.r.axpy(1.0, b) x.zero() else: self.A.mult(x,self.r) self.r *= -1.0 self.r.axpy(1.0, b) self.z.zero() self.B.solve(self.z,self.r) #z = B^-1 r self.d.zero() self.d.axpy(1.,self.z); #d = z nom0 = self.d.inner(self.r) nom = nom0 if self.parameters["print_level"] == 1: print " Iterartion : ", 0, " (B r, r) = ", nom rtol2 = nom * self.parameters["rel_tolerance"] * self.parameters["rel_tolerance"] atol2 = self.parameters["abs_tolerance"] * self.parameters["abs_tolerance"] r0 = max(rtol2, atol2) if nom <= r0: self.converged = True self.reasonid = 1 self.final_norm = math.sqrt(nom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm return self.A.mult(self.d, self.z) #z = A d den = self.z.inner(self.d) if den <= 0.0: self.converged = True self.reasonid = 2 self.final_norm = math.sqrt(nom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm return # start iteration self.iter = 1 while True: alpha = nom/den x.axpy(alpha,self.d) # x = x + alpha d self.r.axpy(-alpha, self.z) # r = r - alpha A d self.B.solve(self.z, self.r) # z = B^-1 r betanom = self.r.inner(self.z) if self.parameters["print_level"] == 1: print " Iteration : ", self.iter, " (B r, r) = ", betanom if betanom < r0: self.converged = True self.reasonid = 1 self.final_norm = math.sqrt(betanom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm break self.iter += 1 if self.iter > self.parameters["max_iter"]: self.converged = False self.reasonid = 0 self.final_norm = math.sqrt(betanom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Not Converged. Final residual norm ", self.final_norm break beta = betanom/nom self.d *= beta self.d.axpy(1., self.z) #d = z + beta d self.A.mult(self.d,self.z) # z = A d den = self.d.inner(self.z) if den <= 0.0: self.converged = True self.reasonid = 2 self.final_norm = math.sqrt(nom) if(self.parameters["print_level"] >= 0): print self.reason[self.reasonid] print "Converged in ", self.iter, " iterations with final norm ", self.final_norm break nom = betanom