예제 #1
0
 def incradj(self, uin):
     """Compute the observation part of the incremental adjoint equation, i.e,
     return B^T.B.uin
     uin = Function(self.V)"""
     isFunction(uin)
     self.BtBu.vector()[:] = self.BTdot( self.Bdotlocal(uin) )
     return self.BtBu.vector()
예제 #2
0
 def Bdotlocal(self, uin):
     """Compute B.uin as a np.array, using only local info
     uin must be a Function(self.V)"""
     isFunction(uin)
     Bu = np.zeros(self.nbPts)
     for ii, bb in enumerate(self.B):
         Bu[ii] = np.dot(bb.array(), uin.vector().array())   # Note: local inner-product
     return Bu
예제 #3
0
 def Bdot(self, uin):
     """Compute B.uin as a np.array, using global info
     uin must be a Function(self.V)"""
     isFunction(uin)
     Bu = np.zeros(self.nbPts)
     for ii, bb in enumerate(self.B):
         Bu[ii] = bb.inner(uin.vector()) # Note: global inner-product
     return Bu
예제 #4
0
 def assemble_rhsadj(self, uin, udin, outp, bc):
     """Compute rhs term for adjoint equation and store it in outp, i.e.,
     outp = - B^T( uin - udin), where uin = obs(fwd solution)
     uin & udin = np.arrays
     outp = Function(self.V)
     bc = fenics' boundary conditons"""
     arearrays(uin, udin)
     isFunction(outp)
     diff = uin - udin
     outp.vector()[:] = -1.0 * self.BTdot(diff)
     bc.apply(outp.vector())
예제 #5
0
 def _assemble(self):
     # Get input:
     self.gamma = self.Parameters['gamma']
     if self.Parameters.has_key('beta'): self.beta = self.Parameters['beta']
     else: self.beta = 0.0
     self.Vm = self.Parameters['Vm']
     if self.Parameters.has_key('m0'):
         self.m0 = self.Parameters['m0'].copy(deepcopy=True)
         isFunction(self.m0)
     else:
         self.m0 = Function(self.Vm)
     self.mtrial = TrialFunction(self.Vm)
     self.mtest = TestFunction(self.Vm)
     self.mysample = Function(self.Vm)
     self.draw = Function(self.Vm)
     # Assemble:
     self.R = assemble(inner(nabla_grad(self.mtrial), \
     nabla_grad(self.mtest))*dx)
     self.M = PETScMatrix()
     assemble(inner(self.mtrial, self.mtest) * dx, tensor=self.M)
     # preconditioner is Gamma^{-1}:
     if self.beta > 1e-16:
         self.precond = self.gamma * self.R + self.beta * self.M
     else:
         self.precond = self.gamma * self.R + (1e-14) * self.M
     # Discrete operator K:
     self.K = self.gamma * self.R + self.beta * self.M
     # Get eigenvalues for M:
     self.eigsolM = SLEPcEigenSolver(self.M)
     self.eigsolM.solve()
     # Solver for M^{-1}:
     self.solverM = LUSolver()
     self.solverM.parameters['reuse_factorization'] = True
     self.solverM.parameters['symmetric'] = True
     self.solverM.set_operator(self.M)
     # Solver for K^{-1}:
     self.solverK = LUSolver()
     self.solverK.parameters['reuse_factorization'] = True
     self.solverK.parameters['symmetric'] = True
     self.solverK.set_operator(self.K)
예제 #6
0
 def _assemble(self):
     # Get input:
     self.gamma = self.Parameters['gamma']
     if self.Parameters.has_key('beta'): self.beta = self.Parameters['beta']
     else:   self.beta = 0.0
     self.Vm = self.Parameters['Vm']
     if self.Parameters.has_key('m0'):   
         self.m0 = self.Parameters['m0'].copy(deepcopy=True)
         isFunction(self.m0)
     else:   self.m0 = Function(self.Vm)
     self.mtrial = TrialFunction(self.Vm)
     self.mtest = TestFunction(self.Vm)
     self.mysample = Function(self.Vm)
     self.draw = Function(self.Vm)
     # Assemble:
     self.R = assemble(inner(nabla_grad(self.mtrial), \
     nabla_grad(self.mtest))*dx)
     self.M = assemble(inner(self.mtrial, self.mtest)*dx)
     # preconditioner is Gamma^{-1}:
     if self.beta > 1e-16: self.precond = self.gamma*self.R + self.beta*self.M
     else:   self.precond = self.gamma*self.R + (1e-14)*self.M
     # Minvprior is M.A^2 (if you use M inner-product):
     self.Minvprior = self.gamma*self.R + self.beta*self.M
예제 #7
0
 def _assemble(self):
     # Get input:
     self.gamma = self.Parameters['gamma']
     if self.Parameters.has_key('beta'): self.beta = self.Parameters['beta']
     else:   self.beta = 0.0
     self.Vm = self.Parameters['Vm']
     if self.Parameters.has_key('m0'):   
         self.m0 = self.Parameters['m0'].copy(deepcopy=True)
         isFunction(self.m0)
     else:   self.m0 = Function(self.Vm)
     self.mtrial = TrialFunction(self.Vm)
     self.mtest = TestFunction(self.Vm)
     self.mysample = Function(self.Vm)
     self.draw = Function(self.Vm)
     # Assemble:
     self.R = assemble(inner(nabla_grad(self.mtrial), \
     nabla_grad(self.mtest))*dx)
     self.M = PETScMatrix()
     assemble(inner(self.mtrial, self.mtest)*dx, tensor=self.M)
     # preconditioner is Gamma^{-1}:
     if self.beta > 1e-16: self.precond = self.gamma*self.R + self.beta*self.M
     else:   self.precond = self.gamma*self.R + (1e-14)*self.M
     # Discrete operator K:
     self.K = self.gamma*self.R + self.beta*self.M
     # Get eigenvalues for M:
     self.eigsolM = SLEPcEigenSolver(self.M)
     self.eigsolM.solve()
     # Solver for M^{-1}:
     self.solverM = LUSolver()
     self.solverM.parameters['reuse_factorization'] = True
     self.solverM.parameters['symmetric'] = True
     self.solverM.set_operator(self.M)
     # Solver for K^{-1}:
     self.solverK = LUSolver()
     self.solverK.parameters['reuse_factorization'] = True
     self.solverK.parameters['symmetric'] = True
     self.solverK.set_operator(self.K)
예제 #8
0
 def grad(self, m_in):
     isFunction(m_in)
     diff = m_in.vector() - self.m0.vector()
     return self.Minvpriordot(diff)
예제 #9
0
 def obs(self, uin):
     """ return result from pointwise observation w/o time-filtering """
     isFunction(uin)
     return  self.PtwiseObs.Bdot(uin)
예제 #10
0
 def grad(self, uin, udin):
     isFunction(uin)
     isFunction(udin)
     setfct(self.diff, uin.vector() - udin.vector())
     return self.W * self.diffv
예제 #11
0
 def costfct_F(self, uin, udin):
     isFunction(uin)
     isFunction(udin)
     setfct(self.diff, uin.vector()-udin.vector())
     return 0.5 * (self.W*self.diffv).inner(self.diffv)
예제 #12
0
 def incradj(self, uin):
     isFunction(uin)
     return self.hessian(uin.vector())
예제 #13
0
 def assemble_rhsadj(self, uin, udin, outp, bc):
     arearrays(uin, udin)
     isFunction(outp)
     self.diffv[:] = uin - udin
     outp.vector()[:] = - (self.W * self.diffv).array()
     bc.apply(outp.vector())
예제 #14
0
 def obs(self, uin):
     isFunction(uin)
     if not(self.noise): return uin.vector().array(), 0.0
     else:   return self.apply_noise(uin.vector().array())
예제 #15
0
 def grad(self, m_in):
     isFunction(m_in)
     diff = m_in.vector() - self.m0.vector()
     return self.Minvpriordot(diff)
예제 #16
0
 def cost(self, m_in):
     isFunction(m_in)
     diff = m_in.vector() - self.m0.vector()
     return 0.5 * self.Minvpriordot(diff).inner(diff)