def incradj(self, uin): """Compute the observation part of the incremental adjoint equation, i.e, return B^T.B.uin uin = Function(self.V)""" isFunction(uin) self.BtBu.vector()[:] = self.BTdot( self.Bdotlocal(uin) ) return self.BtBu.vector()
def Bdotlocal(self, uin): """Compute B.uin as a np.array, using only local info uin must be a Function(self.V)""" isFunction(uin) Bu = np.zeros(self.nbPts) for ii, bb in enumerate(self.B): Bu[ii] = np.dot(bb.array(), uin.vector().array()) # Note: local inner-product return Bu
def Bdot(self, uin): """Compute B.uin as a np.array, using global info uin must be a Function(self.V)""" isFunction(uin) Bu = np.zeros(self.nbPts) for ii, bb in enumerate(self.B): Bu[ii] = bb.inner(uin.vector()) # Note: global inner-product return Bu
def assemble_rhsadj(self, uin, udin, outp, bc): """Compute rhs term for adjoint equation and store it in outp, i.e., outp = - B^T( uin - udin), where uin = obs(fwd solution) uin & udin = np.arrays outp = Function(self.V) bc = fenics' boundary conditons""" arearrays(uin, udin) isFunction(outp) diff = uin - udin outp.vector()[:] = -1.0 * self.BTdot(diff) bc.apply(outp.vector())
def _assemble(self): # Get input: self.gamma = self.Parameters['gamma'] if self.Parameters.has_key('beta'): self.beta = self.Parameters['beta'] else: self.beta = 0.0 self.Vm = self.Parameters['Vm'] if self.Parameters.has_key('m0'): self.m0 = self.Parameters['m0'].copy(deepcopy=True) isFunction(self.m0) else: self.m0 = Function(self.Vm) self.mtrial = TrialFunction(self.Vm) self.mtest = TestFunction(self.Vm) self.mysample = Function(self.Vm) self.draw = Function(self.Vm) # Assemble: self.R = assemble(inner(nabla_grad(self.mtrial), \ nabla_grad(self.mtest))*dx) self.M = PETScMatrix() assemble(inner(self.mtrial, self.mtest) * dx, tensor=self.M) # preconditioner is Gamma^{-1}: if self.beta > 1e-16: self.precond = self.gamma * self.R + self.beta * self.M else: self.precond = self.gamma * self.R + (1e-14) * self.M # Discrete operator K: self.K = self.gamma * self.R + self.beta * self.M # Get eigenvalues for M: self.eigsolM = SLEPcEigenSolver(self.M) self.eigsolM.solve() # Solver for M^{-1}: self.solverM = LUSolver() self.solverM.parameters['reuse_factorization'] = True self.solverM.parameters['symmetric'] = True self.solverM.set_operator(self.M) # Solver for K^{-1}: self.solverK = LUSolver() self.solverK.parameters['reuse_factorization'] = True self.solverK.parameters['symmetric'] = True self.solverK.set_operator(self.K)
def _assemble(self): # Get input: self.gamma = self.Parameters['gamma'] if self.Parameters.has_key('beta'): self.beta = self.Parameters['beta'] else: self.beta = 0.0 self.Vm = self.Parameters['Vm'] if self.Parameters.has_key('m0'): self.m0 = self.Parameters['m0'].copy(deepcopy=True) isFunction(self.m0) else: self.m0 = Function(self.Vm) self.mtrial = TrialFunction(self.Vm) self.mtest = TestFunction(self.Vm) self.mysample = Function(self.Vm) self.draw = Function(self.Vm) # Assemble: self.R = assemble(inner(nabla_grad(self.mtrial), \ nabla_grad(self.mtest))*dx) self.M = assemble(inner(self.mtrial, self.mtest)*dx) # preconditioner is Gamma^{-1}: if self.beta > 1e-16: self.precond = self.gamma*self.R + self.beta*self.M else: self.precond = self.gamma*self.R + (1e-14)*self.M # Minvprior is M.A^2 (if you use M inner-product): self.Minvprior = self.gamma*self.R + self.beta*self.M
def _assemble(self): # Get input: self.gamma = self.Parameters['gamma'] if self.Parameters.has_key('beta'): self.beta = self.Parameters['beta'] else: self.beta = 0.0 self.Vm = self.Parameters['Vm'] if self.Parameters.has_key('m0'): self.m0 = self.Parameters['m0'].copy(deepcopy=True) isFunction(self.m0) else: self.m0 = Function(self.Vm) self.mtrial = TrialFunction(self.Vm) self.mtest = TestFunction(self.Vm) self.mysample = Function(self.Vm) self.draw = Function(self.Vm) # Assemble: self.R = assemble(inner(nabla_grad(self.mtrial), \ nabla_grad(self.mtest))*dx) self.M = PETScMatrix() assemble(inner(self.mtrial, self.mtest)*dx, tensor=self.M) # preconditioner is Gamma^{-1}: if self.beta > 1e-16: self.precond = self.gamma*self.R + self.beta*self.M else: self.precond = self.gamma*self.R + (1e-14)*self.M # Discrete operator K: self.K = self.gamma*self.R + self.beta*self.M # Get eigenvalues for M: self.eigsolM = SLEPcEigenSolver(self.M) self.eigsolM.solve() # Solver for M^{-1}: self.solverM = LUSolver() self.solverM.parameters['reuse_factorization'] = True self.solverM.parameters['symmetric'] = True self.solverM.set_operator(self.M) # Solver for K^{-1}: self.solverK = LUSolver() self.solverK.parameters['reuse_factorization'] = True self.solverK.parameters['symmetric'] = True self.solverK.set_operator(self.K)
def grad(self, m_in): isFunction(m_in) diff = m_in.vector() - self.m0.vector() return self.Minvpriordot(diff)
def obs(self, uin): """ return result from pointwise observation w/o time-filtering """ isFunction(uin) return self.PtwiseObs.Bdot(uin)
def grad(self, uin, udin): isFunction(uin) isFunction(udin) setfct(self.diff, uin.vector() - udin.vector()) return self.W * self.diffv
def costfct_F(self, uin, udin): isFunction(uin) isFunction(udin) setfct(self.diff, uin.vector()-udin.vector()) return 0.5 * (self.W*self.diffv).inner(self.diffv)
def incradj(self, uin): isFunction(uin) return self.hessian(uin.vector())
def assemble_rhsadj(self, uin, udin, outp, bc): arearrays(uin, udin) isFunction(outp) self.diffv[:] = uin - udin outp.vector()[:] = - (self.W * self.diffv).array() bc.apply(outp.vector())
def obs(self, uin): isFunction(uin) if not(self.noise): return uin.vector().array(), 0.0 else: return self.apply_noise(uin.vector().array())
def cost(self, m_in): isFunction(m_in) diff = m_in.vector() - self.m0.vector() return 0.5 * self.Minvpriordot(diff).inner(diff)