def L_BFGS(sigma, mu): ''' Limited memory BFGS quasi-Newton method implementation used to solve the minimization problem arising from the least squares reconstruction method. For more information about the limited-memory BFGS method, refer to Numerical Optimization by Nocedal and Wright ''' def cost_sigma(s_vec): s = Function(Vs) s.vector().set_local(s_vec.array()[:]) return cost(Gamma, s, mu, u, H_data, k) def grad_sigma(s_vec): s = Function(Vs) s.vector().set_local(s_vec.array()[:]) return gradient(gamma, s, mu, Gamma, u, v)[0] def cost_mu(m_vec): m = Function(Vs) m.vector().set_local(m_vec.array()[:]) return cost(Gamma, sigma, m, u, H_data, k) def grad_mu(m_vec): m = Function(Vs) m.vector().set_local(m_vec.array()[:]) return gradient(gamma, sigma, m, Gamma, u, v)[1] k = 0 m_MAX = 7 m = 1 s_sigma = deque() s_mu = deque() y_sigma = deque() y_mu = deque() rho_sigma = deque() rho_mu = deque() alpha_sigma = [0] * m_MAX alpha_mu = [0] * m_MAX converged = False # Take a single step q_sigma, q_mu = gradient(gamma, sigma, mu, Gamma, u, v) c = cost(Gamma, sigma, mu, u, H_data, k) a_sigma = linesearch(cost_sigma, grad_sigma, sigma.vector(), -q_sigma, c, q_sigma.inner(q_sigma)) a_mu = linesearch(cost_mu, grad_mu, mu.vector(), -q_mu, c, q_mu.inner(q_mu)) sigma.vector()[:] = sigma.vector()[:] - a_sigma * q_sigma mu.vector()[:] = mu.vector()[:] - a_mu * q_mu s_sigma.append(-a_sigma * q_sigma) s_mu.append(-a_mu * q_mu) q_sigma_prev = q_sigma q_mu_prev = q_mu q_sigma, q_mu = gradient(gamma, sigma, mu, Gamma, u, v) y_sigma.append(q_sigma - q_sigma_prev) y_mu.append(q_mu - q_mu_prev) rho_sigma.append(1.0/y_sigma[-1].inner(s_sigma[-1])) rho_mu.append(1.0/y_mu[-1].inner(s_mu[-1])) while not converged: # TODO: Conditions for convergence c = cost(Gamma, sigma, mu, u, H_data, k) print ("cost: {}".format(c)) if c < 1e-5 or k == 20: if c < 1e1: print ("Converged") else: print ("Maximum iterations reached") converged = True break plt.figure(figsize=(15,5)) nb.plot(sigma, subplot_loc=121, mytitle="sigma", show_axis='on') nb.plot(mu, subplot_loc=122, mytitle="mu") plt.show() for i in range(0,m): alpha_sigma[i] = rho_sigma[i] * s_sigma[i].inner(q_sigma) q_sigma = q_sigma - alpha_sigma[i] * y_sigma[i] alpha_mu[i] = rho_mu[i] * s_mu[i].inner(q_mu) q_mu = q_mu - alpha_mu[i] * y_mu[i] gamma_k_sigma = s_sigma[-1].inner(y_sigma[-1])/y_sigma[-1].inner(y_sigma[-1]) r_sigma = gamma_k_sigma * q_sigma gamma_k_mu = s_mu[-1].inner(y_mu[-1])/y_mu[-1].inner(y_mu[-1]) r_mu = gamma_k_mu * q_mu for i in range(m-1,-1,-1): # Iterate backwards beta_sigma = rho_sigma[i] * y_sigma[i].inner(r_sigma) r_sigma = r_sigma + s_sigma[i]*(alpha_sigma[i] - beta_sigma) beta_mu = rho_mu[i] * y_mu[i].inner(r_mu) r_mu = r_mu + s_mu[i]*(alpha_mu[i] - beta_mu) p_sigma_k = -r_sigma p_mu_k = -r_mu a_sigma = linesearch(cost_sigma, grad_sigma, sigma.vector(), p_sigma_k, c, q_sigma.inner(p_sigma_k)) a_mu = linesearch(cost_mu, grad_mu, mu.vector(), p_mu_k, c, q_mu.inner(p_mu_k)) sigma.vector()[:] = sigma.vector()[:] + a_sigma * p_sigma_k mu.vector()[:] = mu.vector()[:] + a_mu * p_mu_k k += 1 if m < m_MAX: m += 1 s_sigma.append(a_sigma * p_sigma_k) s_mu.append(a_mu * p_mu_k) q_sigma_prev = q_sigma q_mu_prev = q_mu q_sigma, q_mu = gradient(gamma, sigma, mu, Gamma, u, v) print ("obj_d_sigma norm: {}".format(norm(q_sigma))) print ("obj_d_mu norm: {}".format(norm(q_mu))) y_sigma.append(q_sigma - q_sigma_prev) y_mu.append(q_mu - q_mu_prev) rho_sigma.append(1.0/y_sigma[-1].inner(s_sigma[-1])) rho_mu.append(1.0/y_mu[-1].inner(s_mu[-1])) if k >= m_MAX: s_sigma.popleft() s_mu.popleft() y_sigma.popleft() y_mu.popleft() rho_sigma.popleft() rho_mu.popleft()
logging.getLogger('FFC').setLevel(logging.WARNING) logging.getLogger('UFL').setLevel(logging.WARNING) set_log_active(False) # Define the mesh and finite element spaces nx = 32 ny = 32 mesh = Mesh("circle.xml") Vh = FunctionSpace(mesh, "CG", 1) uh = Function(Vh) u_hat = TestFunction(Vh) u_tilde = TrialFunction(Vh) nb.plot(mesh) print "dim(Vh) = ", Vh.dim() # Define the energy functional Pi = sqrt(Constant(1.0) + inner(nabla_grad(uh), nabla_grad(uh)))*dx def Dirichlet_boundary(x, on_boundary): if (x[0]*x[0] + x[1]*x[1]) > 0.99: return True else: return False u_0 = Expression("pow(x[0], 4) - pow(x[1], 2)") bc = DirichletBC(Vh, u_0, Dirichlet_boundary) u_zero = Constant(0.) bc_zero = DirichletBC(Vh,u_zero, Dirichlet_boundary)
import nb import numpy as np import matplotlib.pyplot as plt sim = nb.Simulation(com=False) p0 = sim.add(m=1., x=-3.) p1 = sim.add(m=2., x=-2.) p2 = sim.add(m=3., x=-1.) p3 = sim.add(m=4., x= 0.) p4 = sim.add(m=5., x= 2.) p5 = sim.add(m=6., x= 4.) h = 1e-4 *sim.m**2.5 / (sim.N * np.sqrt(abs(sim.T-sim.U))) sim.run(5.,h) fig = nb.plot(sim) fig.savefig("6b1d.png")
import numpy as np import logging import matplotlib.pyplot as plt import nb logging.getLogger('FFC').setLevel(logging.WARNING) logging.getLogger('UFL').setLevel(logging.WARNING) set_log_active(False) # 2. Define the mesh and the finite element space n = 16 degree = 1 mesh = RectangleMesh(0, 0, 1, 1, n, n) nb.plot(mesh) Vh = FunctionSpace(mesh, 'Lagrange', degree) print "dim(Vh) = ", Vh.dim() # 3. Define boundary labels class TopBoundary(SubDomain): def inside(self, x, on_boundary): return on_boundary and abs(x[1] - 1) < DOLFIN_EPS class BottomBoundary(SubDomain): def inside(self, x, on_boundary): return on_boundary and abs(x[1]) < DOLFIN_EPS
# define function for state and adjoint u = Function(Vu) p = Function(Vu) # define Trial and Test Functions u_trial, p_trial, a_trial = TrialFunction(Vu), TrialFunction(Vu), TrialFunction(Va) u_test, p_test, a_test = TestFunction(Vu), TestFunction(Vu), TestFunction(Va) # initialize input functions f = Constant("1.0") u0 = Constant("0.0") # plot plt.figure(figsize=(15,5)) nb.plot(mesh,subplot_loc=121, mytitle="Mesh", show_axis='on') nb.plot(atrue,subplot_loc=122, mytitle="True parameter field") # set up dirichlet boundary conditions def boundary(x,on_boundary): return on_boundary bc_state = DirichletBC(Vu, u0, boundary) bc_adj = DirichletBC(Vu, Constant(0.), boundary) # 3. The cost functional evaluation: # Regularization parameter gamma = 1e-10 # weak for for setting up the misfit and regularization compoment of the cost