def PINVIT(mata, matm, pre, num=1, maxit=20, printrates=True, GramSchmidt=False): """preconditioned inverse iteration""" r = mata.CreateRowVector() Av = mata.CreateRowVector() Mv = mata.CreateRowVector() uvecs = [] for i in range(num): uvecs.append(mata.CreateRowVector()) vecs = [] for i in range(2 * num): vecs.append(mata.CreateRowVector()) for v in uvecs: r.FV().NumPy()[:] = random.rand(len(r.FV())) v.data = pre * r asmall = Matrix(2 * num, 2 * num) msmall = Matrix(2 * num, 2 * num) lams = num * [1] for i in range(maxit): for j in range(num): vecs[j].data = uvecs[j] r.data = mata * vecs[j] - lams[j] * matm * vecs[j] vecs[num + j].data = pre * r if GramSchmidt: Orthogonalize(vecs, matm) for j in range(2 * num): Av.data = mata * vecs[j] Mv.data = matm * vecs[j] for k in range(2 * num): asmall[j, k] = InnerProduct(Av, vecs[k]) msmall[j, k] = InnerProduct(Mv, vecs[k]) ev, evec = scipy.linalg.eigh(a=asmall, b=msmall) lams[:] = ev[0:num] if printrates: print(i, ":", lams) for j in range(num): uvecs[j][:] = 0.0 for k in range(2 * num): uvecs[j].data += float(evec[k, j]) * vecs[k] return lams, uvecs
def Orthogonalize(vecs, mat): mv = [] for i in range(len(vecs)): for j in range(i): vecs[i] -= InnerProduct(vecs[i], mv[j]) * vecs[j] hv = mat.CreateRowVector() hv.data = mat * vecs[i] norm = sqrt(InnerProduct(vecs[i], hv)) vecs[i] *= 1 / norm hv *= 1 / norm mv.append(hv)
def pcg(A, B, b, x=None, tol=1.e-16, maxits=100, saveitfn=None): """Preconditioned Conjugate Gradient iterations for solving the B-preconditioned A-linear system B A x = B b in the inv(B)-inner product. """ if x == None: x = b.CreateVector() # if not given initial guess, x[:] = 0.0 # set initial solution vector = 0 r = b.CreateVector() # r = residual p = b.CreateVector() # p = search direction Br = b.CreateVector() # for storing B*r and A*p Ap = Br r.data = b - A * x Br.data = B * r p.data = Br rBr = [0, InnerProduct(r, Br)] # 2 consecutive residual B-norms for it in range(maxits): rBr[0] = rBr[1] # update residual's B-norm Ap.data = A * p pAp = InnerProduct(p, Ap) alpha = rBr[0] / pAp # alpha = <r, B*r> / <p, A*p> x.data += alpha * p # x = x + alpha * p r.data += (-alpha) * Ap # r1 = r0 - alpha * A*p Br.data = B * r rBr[1] = InnerProduct(r, Br) beta = rBr[1] / rBr[0] # beta = <r1, B*r1> / <r0, B*r0> p.data = beta * p p.data += Br # p = B*r1 + beta * p if saveitfn != None: saveitfn(x, it) if abs(rBr[1].imag) > 1.e-8 or abs(pAp.imag) > 1.e-8: print('\n*** rBr=', rBr, '\n*** pAp=', pAp) print('*** System not Hermitian!') else: if rBr[0].real * rBr[1].real < 0: print('\n*** Preconditioner indefinite!') print('PCG%6d' % it, ': pAp=%12.9g %12.9g' % (pAp.real, rBr[1].real)) if abs(pAp) < tol or abs(rBr[1]) < tol: break return x
def Solve(self, maxit=100, maxerr=1e-11, dampfactor=1, printing=False, callback=None, linesearch=False, printenergy=False, print_wrong_direction=False): numit = 0 err = 1. a, u, w, r,uh = self.a, self.u, self.w, self.r, self.uh for it in range(maxit): numit += 1 if printing: print("Newton iteration ", it) if printenergy: print("Energy: ", a.Energy(u.vec)) a.AssembleLinearization(u.vec) a.Apply(u.vec, r) self._UpdateInverse() if self.rhs is not None: r.data -= self.rhs.vec if a.condense: r.data += a.harmonic_extension_trans * r w.data = self.inv * r w.data += a.harmonic_extension * w w.data += a.inner_solve * r else: w.data = self.inv * r err2 = InnerProduct(w,r) if err2 < 0: if print_wrong_direction: print("wrong direction") err = sqrt(abs(err2)) if printing: print("err = ", err) tau = min(1, numit*dampfactor) if linesearch: uh.data = u.vec - tau*w energy = a.Energy(u.vec) while a.Energy(uh) > energy+(max(1e-14*abs(energy),maxerr)) and tau > 1e-10: tau *= 0.5 uh.data = u.vec - tau * w if printing: print ("tau = ", tau) print ("energy uh = ", a.Energy(uh)) u.vec.data = uh else: u.vec.data -= tau * w if callback is not None: callback(it, err) if abs(err) < maxerr: break else: print("Warning: Newton might not converge! Error = ", err) return (-1,numit) return (0,numit)
def PINVIT(mata, matm, pre, num=1, maxit=20, printrates=True, GramSchmidt=False): """preconditioned inverse iteration""" r = mata.CreateRowVector() uvecs = MultiVector(r, num) vecs = MultiVector(r, 2 * num) # hv = MultiVector(r, 2*num) for v in vecs[0:num]: v.SetRandom() uvecs[:] = pre * vecs[0:num] lams = Vector(num * [1]) for i in range(maxit): vecs[0:num] = mata * uvecs - (matm * uvecs).Scale(lams) vecs[num:2 * num] = pre * vecs[0:num] vecs[0:num] = uvecs vecs.Orthogonalize() # matm) # hv[:] = mata * vecs # asmall = InnerProduct (vecs, hv) # hv[:] = matm * vecs # msmall = InnerProduct (vecs, hv) asmall = InnerProduct(vecs, mata * vecs) msmall = InnerProduct(vecs, matm * vecs) ev, evec = scipy.linalg.eigh(a=asmall, b=msmall) lams = Vector(ev[0:num]) if printrates: print(i, ":", list(lams)) uvecs[:] = vecs * Matrix(evec[:, 0:num]) return lams, uvecs
def CG(mat, rhs, pre=None, sol=None, tol=1e-12, maxsteps=100, printrates=True, initialize=True, conjugate=False): """preconditioned conjugate gradient method""" u = sol if sol else rhs.CreateVector() d = rhs.CreateVector() w = rhs.CreateVector() s = rhs.CreateVector() if initialize: u[:] = 0.0 d.data = rhs - mat * u w.data = pre * d if pre else d err0 = sqrt(abs(InnerProduct(d, w))) s.data = w # wdn = InnerProduct (w,d) wdn = w.InnerProduct(d, conjugate=conjugate) if wdn == 0: return u for it in range(maxsteps): w.data = mat * s wd = wdn # as_s = InnerProduct (s, w) as_s = s.InnerProduct(w, conjugate=conjugate) alpha = wd / as_s u.data += alpha * s d.data += (-alpha) * w w.data = pre * d if pre else d # wdn = InnerProduct (w, d) wdn = w.InnerProduct(d, conjugate=conjugate) beta = wdn / wd s *= beta s.data += w err = sqrt(abs(wd)) if err < tol * err0: break if printrates: print("it = ", it, " err = ", err) return u
def CG(mat, rhs, pre=None, sol=None, maxsteps = 100, printrates = True, initialize = True): """preconditioned conjugate gradient method""" u = sol if sol else rhs.CreateVector() d = rhs.CreateVector() w = rhs.CreateVector() s = rhs.CreateVector() if initialize: u[:] = 0.0 d.data = rhs - mat * u w.data = pre * d if pre else d s.data = w wdn = InnerProduct (w,d) for it in range(maxsteps): w.data = mat * s wd = wdn as_s = InnerProduct (s, w) alpha = wd / as_s u.data += alpha * s d.data += (-alpha) * w w.data = pre*d if pre else d wdn = InnerProduct (w, d) beta = wdn / wd s *= beta s.data += w err = sqrt(wd) if err < 1e-16: break if printrates: print ("it = ", it, " err = ", err) return u
def pcg(mat, pre, rhs, maxits=100): """preconditioned conjugate gradient method""" u = rhs.CreateVector() d = rhs.CreateVector() w = rhs.CreateVector() s = rhs.CreateVector() u[:] = 0.0 d.data = rhs - mat * u w.data = pre * d s.data = w wdn = InnerProduct(w, d) print("|u0| = ", wdn) for it in range(maxits): w.data = mat * s wd = wdn Ass = InnerProduct(s, w) alpha = wd / Ass u.data += alpha * s d.data += (-alpha) * w w.data = pre * d wdn = InnerProduct(w, d) beta = wdn / wd s *= beta s.data += w err = sqrt(wd) if err < 1e-16: break print("it = ", it, " err = ", err) return u
def Arnoldi(mat, tol=1e-10, maxiter=200): H = Matrix(maxiter, maxiter, complex=mat.is_complex) H[:, :] = 0 v = mat.CreateVector(colvector=False) abv = MultiVector(v, 0) v.SetRandom() v /= Norm(v) for i in range(maxiter): abv.Append(v) v = (mat * v).Evaluate() for j in range(i + 1): H[j, i] = InnerProduct(v, abv[j]) v -= H[j, i] * abv[j] if i + 1 < maxiter: H[i + 1, i] = Norm(v) v = 1 / Norm(v) * v lam, ev = scipy.linalg.eig(H) return Vector(lam), (abv * Matrix(ev)).Evaluate()
def QMR(mat, rhs, fdofs, pre1=None, pre2=None, sol=None, maxsteps=100, printrates=True, initialize=True, ep=1.0, tol=1e-7): u = sol if sol else rhs.CreateVector() r = rhs.CreateVector() v = rhs.CreateVector() v_tld = rhs.CreateVector() w = rhs.CreateVector() w_tld = rhs.CreateVector() y = rhs.CreateVector() y_tld = rhs.CreateVector() z = rhs.CreateVector() z_tld = rhs.CreateVector() p = rhs.CreateVector() p_tld = rhs.CreateVector() q = rhs.CreateVector() d = rhs.CreateVector() s = rhs.CreateVector() if (initialize): u[:] = 0.0 r.data = rhs - mat * u v_tld.data = r y.data = pre1 * v_tld if pre1 else v_tld rho = InnerProduct(y, y) rho = sqrt(rho) w_tld.data = r z.data = pre2.T * w_tld if pre2 else w_tld xi = InnerProduct(z, z) xi = sqrt(xi) gamma = 1.0 eta = -1.0 theta = 0.0 for i in range(1, maxsteps + 1): if (rho == 0.0): print('Breakdown in rho') return if (xi == 0.0): print('Breakdown in xi') return v.data = (1.0 / rho) * v_tld y.data = (1.0 / rho) * y w.data = (1.0 / xi) * w_tld z.data = (1.0 / xi) * z delta = InnerProduct(z, y) if (delta == 0.0): print('Breakdown in delta') return y_tld.data = pre2 * y if pre2 else y z_tld.data = pre1.T * z if pre1 else z if (i > 1): p.data = (-xi * delta / ep) * p p.data += y_tld q.data = (-rho * delta / ep) * q q.data += z_tld else: p.data = y_tld q.data = z_tld p_tld.data = mat * p ep = InnerProduct(q, p_tld) if (ep == 0.0): print('Breakdown in epsilon') return beta = ep / delta if (beta == 0.0): print('Breakdown in beta') return v_tld.data = p_tld - beta * v y.data = pre1 * v_tld if pre1 else v_tld rho_1 = rho rho = InnerProduct(y, y) rho = sqrt(rho) w_tld.data = mat.T * q w_tld.data -= beta * w z.data = pre2.T * w_tld if pre2 else w_tld xi = InnerProduct(z, z) xi = sqrt(xi) gamma_1 = gamma theta_1 = theta theta = rho / (gamma_1 * abs(beta)) gamma = 1.0 / sqrt(1.0 + theta * theta) if (gamma == 0.0): print('Breakdown in gamma') return eta = -eta * rho_1 * gamma * gamma / (beta * gamma_1 * gamma_1) if (i > 1): d.data = (theta_1 * theta_1 * gamma * gamma) * d d.data += eta * p s.data = (theta_1 * theta_1 * gamma * gamma) * s s.data += eta * p_tld else: d.data = eta * p s.data = eta * p_tld u.data += d r.data -= s #Projected residuum: Better terminating condition necessary? ResNorm = sqrt(np.dot(r.FV().NumPy()[fdofs], r.FV().NumPy()[fdofs])) #ResNorm = sqrt(InnerProduct(r,r)) if (ResNorm <= tol): break if (printrates): print("it = ", i, " err = ", ResNorm)
def MinRes(mat, rhs, pre=None, sol=None, maxsteps=100, printrates=True, initialize=True, tol=1e-7): u = sol if sol else rhs.CreateVector() v_new = rhs.CreateVector() v = rhs.CreateVector() v_old = rhs.CreateVector() w_new = rhs.CreateVector() w = rhs.CreateVector() w_old = rhs.CreateVector() z_new = rhs.CreateVector() z = rhs.CreateVector() mz = rhs.CreateVector() if (initialize): u[:] = 0.0 v.data = rhs else: v.data = rhs - mat * u z.data = pre * v if pre else v #First Step gamma = sqrt(InnerProduct(z, v)) gamma_new = 0 z.data = 1 / gamma * z v.data = 1 / gamma * v ResNorm = gamma ResNorm_old = gamma if (printrates): print("it = ", 0, " err = ", ResNorm) eta_old = gamma c_old = 1 c = 1 s_new = 0 s = 0 s_old = 0 v_old[:] = 0.0 w_old[:] = 0.0 w[:] = 0.0 k = 1 while (k < maxsteps + 1 and ResNorm > tol): mz.data = mat * z delta = InnerProduct(mz, z) v_new.data = mz - delta * v - gamma * v_old z_new.data = pre * v_new if pre else v_new gamma_new = sqrt(InnerProduct(z_new, v_new)) z_new *= 1 / gamma_new v_new *= 1 / gamma_new alpha0 = c * delta - c_old * s * gamma alpha1 = sqrt(alpha0 * alpha0 + gamma_new * gamma_new) #** alpha2 = s * delta + c_old * c * gamma alpha3 = s_old * gamma c_new = alpha0 / alpha1 s_new = gamma_new / alpha1 w_new.data = z - alpha3 * w_old - alpha2 * w w_new.data = 1 / alpha1 * w_new u.data += c_new * eta_old * w_new eta = -s_new * eta_old #update of residuum ResNorm = abs(s_new) * ResNorm_old if (printrates): print("it = ", k, " err = ", ResNorm) k += 1 # shift vectors by renaming v_old, v, v_new = v, v_new, v_old w_old, w, w_new = w, w_new, w_old z, z_new = z_new, z eta_old = eta s_old = s s = s_new c_old = c c = c_new gamma = gamma_new ResNorm_old = ResNorm
def NewtonMinimization(a, u, freedofs=None, maxit=100, maxerr=1e-11, inverse="umfpack", el_int=False, dampfactor=1, linesearch=False, printing=True): """ Newton's method for solving non-linear problems of the form A(u)=0 involving energy integrators. Parameters ---------- a : BilinearForm The BilinearForm of the non-linear variational problem. It does not have to be assembled. u : GridFunction The GridFunction where the solution is saved. The values are used as initial guess for Newton's method. freedofs : BitArray The FreeDofs on which the assembled matrix is inverted. If argument is 'None' then the FreeDofs of the underlying FESpace is used. maxit : int Number of maximal iteration for Newton. If the maximal number is reached before the maximal error Newton might no converge and a warning is displayed. maxerr : float The maximal error which Newton should reach before it stops. The error is computed by the square root of the inner product of the residuum and the correction. inverse : string A string of the sparse direct solver which should be solved for inverting the assembled Newton matrix. el_int : bool Flag if eliminate internal flag is used. If this is set to True then Newton uses static condensation to invert the matrix. If freedofs is not None, the user has to take care that the local dofs are set to zero in the freedofs array. dampfactor : float Set the damping factor for Newton's method. If dampfactor is 1 then no damping is done. If value is < 1 then the damping is done by the formula 'min(1,dampfactor*numit)' for the correction, where 'numit' denotes the Newton iteration. linesearch : bool If True then linesearch is used to guarantee that the energy decreases in every Newton iteration. printing : bool Set if Newton's method should print informations about the actual iteration like the error. Returns ------- (int, int) List of two integers. The first one is 0 if Newton's method did converge, -1 otherwise. The second one gives the number of Newton iterations needed. """ w = u.vec.CreateVector() r = u.vec.CreateVector() uh = u.vec.CreateVector() err = 1 numit = 0 inv = None for it in range(maxit): numit += 1 if printing: print("Newton iteration ", it) print("energy = ", a.Energy(u.vec)) a.Apply(u.vec, r) a.AssembleLinearization(u.vec) inv = a.mat.Inverse(freedofs if freedofs else u.space.FreeDofs(el_int), inverse=inverse) if el_int: r.data += a.harmonic_extension_trans * r w.data = inv * r w.data += a.harmonic_extension * w w.data += a.inner_solve * r else: w.data = inv * r err = sqrt(abs(InnerProduct(w, r))) if printing: print("err = ", err) energy = a.Energy(u.vec) uh.data = u.vec - min(1, numit * dampfactor) * w tau = min(1, numit * dampfactor) if linesearch: while a.Energy(uh) > energy + 1e-15: tau *= 0.5 uh.data = u.vec - tau * w if printing: print("tau = ", tau) print("energy uh = ", a.Energy(uh)) u.vec.data = uh if abs(err) < maxerr: break else: print("Warning: Newton might not converge!") return (-1, numit) return (0, numit)
def QMR(mat, rhs, fdofs, pre1=None, pre2=None, sol=None, maxsteps=100, printrates=True, initialize=True, ep=1.0, tol=1e-7): """Quasi Minimal Residuum method Parameters ---------- mat : Matrix The left hand side of the equation to solve rhs : Vector The right hand side of the equation. fdofs : BitArray BitArray of free degrees of freedoms. pre1 : Preconditioner First preconditioner if provided pre2 : Preconditioner Second preconditioner if provided sol : Vector Start vector for QMR method, if initialize is set False. Gets overwritten by the solution vector. If sol = None then a new vector is created. maxsteps : int Number of maximal steps for QMR. If the maximal number is reached before the tolerance is reached QMR stops. printrates : bool If set to True then the error of the iterations is displayed. initialize : bool If set to True then the initial guess for the QMR method is set to zero. Otherwise the values of the vector sol, if provided, is used. ep : double Start epsilon. tol : double Tolerance of the residuum. QMR stops if tolerance is reached. Returns ------- (vector) Solution vector of the QMR method. """ u = sol if sol else rhs.CreateVector() r = rhs.CreateVector() v = rhs.CreateVector() v_tld = rhs.CreateVector() w = rhs.CreateVector() w_tld = rhs.CreateVector() y = rhs.CreateVector() y_tld = rhs.CreateVector() z = rhs.CreateVector() z_tld = rhs.CreateVector() p = rhs.CreateVector() p_tld = rhs.CreateVector() q = rhs.CreateVector() d = rhs.CreateVector() s = rhs.CreateVector() if (initialize): u[:] = 0.0 r.data = rhs - mat * u v_tld.data = r y.data = pre1 * v_tld if pre1 else v_tld rho = InnerProduct(y, y) rho = sqrt(rho) w_tld.data = r z.data = pre2.T * w_tld if pre2 else w_tld xi = InnerProduct(z, z) xi = sqrt(xi) gamma = 1.0 eta = -1.0 theta = 0.0 for i in range(1, maxsteps + 1): if (rho == 0.0): print('Breakdown in rho') return if (xi == 0.0): print('Breakdown in xi') return v.data = (1.0 / rho) * v_tld y.data = (1.0 / rho) * y w.data = (1.0 / xi) * w_tld z.data = (1.0 / xi) * z delta = InnerProduct(z, y) if (delta == 0.0): print('Breakdown in delta') return y_tld.data = pre2 * y if pre2 else y z_tld.data = pre1.T * z if pre1 else z if (i > 1): p.data = (-xi * delta / ep) * p p.data += y_tld q.data = (-rho * delta / ep) * q q.data += z_tld else: p.data = y_tld q.data = z_tld p_tld.data = mat * p ep = InnerProduct(q, p_tld) if (ep == 0.0): print('Breakdown in epsilon') return beta = ep / delta if (beta == 0.0): print('Breakdown in beta') return v_tld.data = p_tld - beta * v y.data = pre1 * v_tld if pre1 else v_tld rho_1 = rho rho = InnerProduct(y, y) rho = sqrt(rho) w_tld.data = mat.T * q w_tld.data -= beta * w z.data = pre2.T * w_tld if pre2 else w_tld xi = InnerProduct(z, z) xi = sqrt(xi) gamma_1 = gamma theta_1 = theta theta = rho / (gamma_1 * abs(beta)) gamma = 1.0 / sqrt(1.0 + theta * theta) if (gamma == 0.0): print('Breakdown in gamma') return eta = -eta * rho_1 * gamma * gamma / (beta * gamma_1 * gamma_1) if (i > 1): d.data = (theta_1 * theta_1 * gamma * gamma) * d d.data += eta * p s.data = (theta_1 * theta_1 * gamma * gamma) * s s.data += eta * p_tld else: d.data = eta * p s.data = eta * p_tld u.data += d r.data -= s #Projected residuum: Better terminating condition necessary? ResNorm = sqrt(np.dot(r.FV().NumPy()[fdofs], r.FV().NumPy()[fdofs])) #ResNorm = sqrt(InnerProduct(r,r)) if (printrates): print("it = ", i, " err = ", ResNorm) if (ResNorm <= tol): break else: print("Warning: QMR did not converge to TOL") return u
def CG(mat, rhs, pre=None, sol=None, tol=1e-12, maxsteps=100, printrates=True, initialize=True, conjugate=False): """preconditioned conjugate gradient method Parameters ---------- mat : Matrix The left hand side of the equation to solve. The matrix has to be spd or hermitsch. rhs : Vector The right hand side of the equation. pre : Preconditioner If provided the preconditioner is used. sol : Vector Start vector for CG method, if initialize is set False. Gets overwritten by the solution vector. If sol = None then a new vector is created. tol : double Tolerance of the residuum. CG stops if tolerance is reached. maxsteps : int Number of maximal steps for CG. If the maximal number is reached before the tolerance is reached CG stops. printrates : bool If set to True then the error of the iterations is displayed. initialize : bool If set to True then the initial guess for the CG method is set to zero. Otherwise the values of the vector sol, if provided, is used. conjugate : bool If set to True, then the complex inner product is used. Returns ------- (vector) Solution vector of the CG method. """ u = sol if sol else rhs.CreateVector() d = rhs.CreateVector() w = rhs.CreateVector() s = rhs.CreateVector() if initialize: u[:] = 0.0 d.data = rhs - mat * u w.data = pre * d if pre else d err0 = sqrt(abs(InnerProduct(d, w))) s.data = w # wdn = InnerProduct (w,d) wdn = w.InnerProduct(d, conjugate=conjugate) if wdn == 0: return u for it in range(maxsteps): w.data = mat * s wd = wdn # as_s = InnerProduct (s, w) as_s = s.InnerProduct(w, conjugate=conjugate) alpha = wd / as_s u.data += alpha * s d.data += (-alpha) * w w.data = pre * d if pre else d # wdn = InnerProduct (w, d) wdn = w.InnerProduct(d, conjugate=conjugate) beta = wdn / wd s *= beta s.data += w err = sqrt(abs(wd)) if printrates: print("it = ", it, " err = ", err) if err < tol * err0: break else: print("Warning: CG did not converge to TOL") return u
def MinRes(mat, rhs, pre=None, sol=None, maxsteps=100, printrates=True, initialize=True, tol=1e-7): """Minimal Residuum method Parameters ---------- mat : Matrix The left hand side of the equation to solve rhs : Vector The right hand side of the equation. pre : Preconditioner If provided the preconditioner is used. sol : Vector Start vector for MinRes method, if initialize is set False. Gets overwritten by the solution vector. If sol = None then a new vector is created. maxsteps : int Number of maximal steps for MinRes. If the maximal number is reached before the tolerance is reached MinRes stops. printrates : bool If set to True then the error of the iterations is displayed. initialize : bool If set to True then the initial guess for the MinRes method is set to zero. Otherwise the values of the vector sol, if prevented, is used. tol : double Tolerance of the residuum. MinRes stops if tolerance is reached. Returns ------- (vector) Solution vector of the MinRes method. """ u = sol if sol else rhs.CreateVector() v_new = rhs.CreateVector() v = rhs.CreateVector() v_old = rhs.CreateVector() w_new = rhs.CreateVector() w = rhs.CreateVector() w_old = rhs.CreateVector() z_new = rhs.CreateVector() z = rhs.CreateVector() mz = rhs.CreateVector() if (initialize): u[:] = 0.0 v.data = rhs else: v.data = rhs - mat * u z.data = pre * v if pre else v #First Step gamma = sqrt(InnerProduct(z, v)) gamma_new = 0 z.data = 1 / gamma * z v.data = 1 / gamma * v ResNorm = gamma ResNorm_old = gamma if (printrates): print("it = ", 0, " err = ", ResNorm) eta_old = gamma c_old = 1 c = 1 s_new = 0 s = 0 s_old = 0 v_old[:] = 0.0 w_old[:] = 0.0 w[:] = 0.0 k = 1 while (k < maxsteps + 1 and ResNorm > tol): mz.data = mat * z delta = InnerProduct(mz, z) v_new.data = mz - delta * v - gamma * v_old z_new.data = pre * v_new if pre else v_new gamma_new = sqrt(InnerProduct(z_new, v_new)) z_new *= 1 / gamma_new v_new *= 1 / gamma_new alpha0 = c * delta - c_old * s * gamma alpha1 = sqrt(alpha0 * alpha0 + gamma_new * gamma_new) #** alpha2 = s * delta + c_old * c * gamma alpha3 = s_old * gamma c_new = alpha0 / alpha1 s_new = gamma_new / alpha1 w_new.data = z - alpha3 * w_old - alpha2 * w w_new.data = 1 / alpha1 * w_new u.data += c_new * eta_old * w_new eta = -s_new * eta_old #update of residuum ResNorm = abs(s_new) * ResNorm_old if (printrates): print("it = ", k, " err = ", ResNorm) k += 1 # shift vectors by renaming v_old, v, v_new = v, v_new, v_old w_old, w, w_new = w, w_new, w_old z, z_new = z_new, z eta_old = eta s_old = s s = s_new c_old = c c = c_new gamma = gamma_new ResNorm_old = ResNorm else: print("Warning: MinRes did not converge to TOL") return u
def Newton(a, u, freedofs=None, maxit=100, maxerr=1e-11, inverse="umfpack", dampfactor=1, printing=True): """ Newton's method for solving non-linear problems of the form A(u)=0. Parameters ---------- a : BilinearForm The BilinearForm of the non-linear variational problem. It does not have to be assembled. u : GridFunction The GridFunction where the solution is saved. The values are used as initial guess for Newton's method. freedofs : BitArray The FreeDofs on which the assembled matrix is inverted. If argument is 'None' then the FreeDofs of the underlying FESpace is used. maxit : int Number of maximal iteration for Newton. If the maximal number is reached before the maximal error Newton might no converge and a warning is displayed. maxerr : float The maximal error which Newton should reach before it stops. The error is computed by the square root of the inner product of the residuum and the correction. inverse : string A string of the sparse direct solver which should be solved for inverting the assembled Newton matrix. dampfactor : float Set the damping factor for Newton's method. If dampfactor is 1 then no damping is done. If value is < 1 then the damping is done by the formula 'min(1,dampfactor*numit)' for the correction, where 'numit' denotes the Newton iteration. printing : bool Set if Newton's method should print informations about the actual iteration like the error. Returns ------- (int, int) List of two integers. The first one is 0 if Newton's method did converge, -1 otherwise. The second one gives the number of Newton iterations needed. """ w = u.vec.CreateVector() r = u.vec.CreateVector() err = 1 numit = 0 inv = None for it in range(maxit): numit += 1 if printing: print("Newton iteration ", it) a.Apply(u.vec, r) a.AssembleLinearization(u.vec) inv = a.mat.Inverse( freedofs if freedofs else u.space.FreeDofs(a.condense), inverse=inverse) if a.condense: r.data += a.harmonic_extension_trans * r w.data = inv * r w.data += a.harmonic_extension * w w.data += a.inner_solve * r else: w.data = inv * r err = sqrt(abs(InnerProduct(w, r))) if printing: print("err = ", err) u.vec.data -= min(1, numit * dampfactor) * w if abs(err) < maxerr: break else: print("Warning: Newton might not converge!") return (-1, numit) return (0, numit)