def run_L2tracking_optimization(write_output=False): """ Test template for fsz.LevelsetFunctional.""" # tool for developing new tests, allows storing shape iterates if write_output: out = fd.File("domain.pvd") def cb(*args): out.write(Q.mesh_m.coordinates) cb() else: cb = None # setup problem mesh = fd.UnitSquareMesh(30, 30) Q = fs.FeControlSpace(mesh) inner = fs.ElasticityInnerProduct(Q) q = fs.ControlVector(Q, inner) # setup PDE constraint mesh_m = Q.mesh_m e = PoissonSolver(mesh_m) # create PDEconstrained objective functional J_ = L2trackingObjective(e, Q, cb=cb) J = fs.ReducedObjective(J_, e) # ROL parameters params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 10 } }, 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } }, }, 'Status Test': { 'Gradient Tolerance': 1e-4, 'Step Tolerance': 1e-5, 'Iteration Limit': 15 } } # assemble and solve ROL optimization problem params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() # verify that the norm of the gradient at optimum is small enough state = solver.getAlgorithmState() assert (state.gnorm < 1e-4)
def solve(self): """ Solve the optimization problem and return the optimized parameters. """ bnd = self.bounds econs = self.constraints[0][0] emuls = self.constraints[0][1] icons = self.constraints[1][0] imuls = self.constraints[1][1] if len(icons) > 0: zeros = [i.clone() for i in imuls] ibnds = [ROL.Bounds(z, isLower=True) for z in zeros] else: ibnds = [] rolproblem = ROL.OptimizationProblem(self.rolobjective, self.rolvector, bnd=bnd, econs=econs, emuls=emuls, icons=icons, imuls=imuls, ibnds=ibnds) x = self.rolvector params = ROL.ParameterList(self.params_dict, "Parameters") self.solver = ROL.OptimizationSolver(rolproblem, params) self.solver.solve() return self.problem.reduced_functional.controls.delist(x.dat)
def test_std_vector_run(): params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 5 } }, 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Method' } } }, 'Status Test': { 'Gradient Tolerance': 1e-15, 'Relative Gradient Tolerance': 1e-10, 'Step Tolerance': 1e-16, 'Relative Step Tolerance': 1e-10, 'Iteration Limit': 10 } } params = ROL.ParameterList(params_dict, "Parameters") algo = ROL.Algorithm("Line Search", params) x = ROL.StdVector(2) x[0] = -1.0 x[1] = 2.0 algo.run(x, obj) # Check answer to 8 decimal places assert round(x[0] - 1.0, 8) == 0.0 assert round(x[1], 8) == 0.0
def run_HS13(Vec, params_dict): obj = HS13_Obj() x = Vec(2) d = Vec(2) HS13_initial_guess(x) l = Vec(1) l[0] = 1.0 icon = HS13_Icon() ilower = Vec(1) HS13_Ibnds(ilower) ibnd = ROL.Bounds(ilower, isLower=True) lower = Vec(2) HS13_Bnd(lower) bnd = ROL.Bounds(lower, isLower=True) params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(obj, x, bnd=bnd, icons=[icon], imuls=[l], ibnds=[ibnd]) solver = ROL.OptimizationSolver(problem, params) solver.solve() print(x[0], x[1]) assert HS13_minimum(x)
def test_equality_constraint(pytestconfig): mesh = fs.DiskMesh(0.05, radius=2.) Q = fs.FeControlSpace(mesh) inner = fs.ElasticityInnerProduct(Q, direct_solve=True) mesh_m = Q.mesh_m (x, y) = fd.SpatialCoordinate(mesh_m) q = fs.ControlVector(Q, inner) if pytestconfig.getoption("verbose"): out = fd.File("domain.pvd") def cb(*args): out.write(Q.mesh_m.coordinates) else: cb = None f = (pow(2 * x, 2)) + pow(y - 0.1, 2) - 1.2 J = fsz.LevelsetFunctional(f, Q, cb=cb) vol = fsz.LevelsetFunctional(fd.Constant(1.0), Q) e = fs.EqualityConstraint([vol]) emul = ROL.StdVector(1) params_dict = { 'Step': { 'Type': 'Augmented Lagrangian', 'Augmented Lagrangian': { 'Subproblem Step Type': 'Line Search', 'Penalty Parameter Growth Factor': 2., 'Initial Penalty Parameter': 1., 'Subproblem Iteration Limit': 20, }, 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } }, }, 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 5 } }, 'Status Test': { 'Gradient Tolerance': 1e-4, 'Step Tolerance': 1e-10, 'Iteration Limit': 10 } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q, econ=e, emul=emul) solver = ROL.OptimizationSolver(problem, params) solver.solve() state = solver.getAlgorithmState() assert (state.gnorm < 1e-4) assert (state.cnorm < 1e-6)
def run_U(algo): obj = MyObj() paramsDict["Step"]["Type"] = algo params = ROL.ParameterList(paramsDict, "Parameters") x = NumpyVector(2) optimProblem = ROL.OptimizationProblem(obj, x) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve() print(x.data) assert round(x[0] - 1.0, 6) == 0.0 assert round(x[1], 6) == 0.0
def test_create_bounds_seperately(): obj = MyObj() paramsDict["Step"]["Type"] = "Trust Region" params = ROL.ParameterList(paramsDict, "Parameters") x = NumpyVector(2) bnd = createBounds() bnd.test() optimProblem = ROL.OptimizationProblem(obj, x, bnd=bnd) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve() assert round(x[0] - 0.7, 6) == 0.0 assert round(x[1], 6) == 0.0
def run_HS4(Vec, params_dict): obj = HS4_Obj() x = Vec(2) HS4_initial_guess(x) lower = Vec(2) upper = Vec(2) HS4_Bnd(lower, upper) bnd = ROL.Bounds(lower, upper, 1.0) params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(obj, x, bnd=bnd) solver = ROL.OptimizationSolver(problem, params) solver.solve() print(x[0], x[1]) assert HS4_minimum(x)
def run_E(algo): obj = MyObj2() paramsDict["Step"]["Type"] = algo params = ROL.ParameterList(paramsDict, "Parameters") x = NumpyVector(2) x[0] = 0.5 * 0.5**2 x[1] = 0.5 * 0.5**2 l = NumpyVector(1) con = EqConstraint() optimProblem = ROL.OptimizationProblem(obj, x, econ=con, emul=l) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve() assert round(x[0] - 0.707106, 5) == 0.0 assert round(x[1] - 0.707106, 5) == 0.0
def run_HS28(Vec, params_dict): obj = HS28_Obj() x = Vec(3) HS28_initial_guess(x) # obj.checkGradient(x) # obj.checkHessVec(x) HS28_initial_guess(x) l = Vec(1) l[0] = 0.0 con = HS28_Econ() params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(obj, x, econs=[con], emuls=[l]) solver = ROL.OptimizationSolver(problem, params) solver.solve() print(x[0], x[1], x[2]) assert HS28_minimum(x)
def run_B(algo): obj = MyObj() paramsDict["Step"]["Type"] = algo params = ROL.ParameterList(paramsDict, "Parameters") x = NumpyVector(2) x_lo = NumpyVector(2) x_lo[0] = -1 x_lo[1] = -1 x_up = NumpyVector(2) x_up[0] = +0.7 x_up[1] = +0.7 bnd = ROL.Bounds(x_lo, x_up, 1.0) optimProblem = ROL.OptimizationProblem(obj, x, bnd=bnd) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve() assert round(x[0] - 0.7, 6) == 0.0 assert round(x[1], 6) == 0.0
def test_TimeTracking(): """ Main test.""" # setup problem mesh = fd.UnitSquareMesh(20, 20) Q = fs.FeControlSpace(mesh) inner = fs.LaplaceInnerProduct(Q, fixed_bids=[1, 2, 3, 4]) q = fs.ControlVector(Q, inner) # create PDEconstrained objective functional J = TimeTracking(Q) # ROL parameters params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 25 } }, 'Step': { 'Type': 'Trust Region' }, 'Status Test': { 'Gradient Tolerance': 1e-3, 'Step Tolerance': 1e-8, 'Iteration Limit': 20 } } # assemble and solve ROL optimization problem params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() # verify that the norm of the gradient at optimum is small enough state = solver.getAlgorithmState() assert (state.gnorm < 1e-3)
def run_HS29(Vec, params_dict): params = ROL.ParameterList(params_dict, "Parameters") obj = HS29_Obj() x = Vec(3) x[0] = 1. x[1] = 2. x[2] = .1 d = Vec(3) d[0] = 1 d[1] = -1 d[2] = 1. v = Vec(3) v[0] = 1 v[1] = -1 v[2] = 1. # obj.checkGradient(x) # obj.checkHessVec(x, d, 4, 1) HS29_initial_guess(x) l = Vec(1) l[0] = 0.0 con = HS29_Icon() jv = Vec(1) jv[0] = 1. # con.checkApplyJacobian(x, d, jv, 4, 1) # con.checkAdjointConsistencyJacobian(jv, d, x) # con.checkApplyAdjointHessian(x, jv, d, v, 5, 1) ilower = Vec(1) HS29_Ibnds(ilower) ibnd = ROL.Bounds(ilower, isLower=True) problem = ROL.OptimizationProblem(obj, x, icons=[con], imuls=[l], ibnds=[ibnd]) solver = ROL.OptimizationSolver(problem, params) solver.solve() print(x[0], x[1], x[2]) assert HS29_minimum(x)
def test_levelset(dim, inner_t, controlspace_t, use_extension, pytestconfig): verbose = pytestconfig.getoption("verbose") """ Test template for fsz.LevelsetFunctional.""" clscale = 0.1 if dim == 2 else 0.2 # make the mesh a bit coarser if we are using a multigrid control space as # we are refining anyway if controlspace_t == fs.FeMultiGridControlSpace: clscale *= 4 if dim == 2: mesh = fs.DiskMesh(clscale) elif dim == 3: mesh = fs.SphereMesh(clscale) else: raise NotImplementedError if controlspace_t == fs.BsplineControlSpace: if dim == 2: bbox = [(-2, 2), (-2, 2)] orders = [2, 2] levels = [4, 4] else: bbox = [(-3, 3), (-3, 3), (-3, 3)] orders = [2, 2, 2] levels = [3, 3, 3] Q = fs.BsplineControlSpace(mesh, bbox, orders, levels) elif controlspace_t == fs.FeMultiGridControlSpace: Q = fs.FeMultiGridControlSpace(mesh, refinements=1, order=2) else: Q = controlspace_t(mesh) inner = inner_t(Q) # if running with -v or --verbose, then export the shapes if verbose: out = fd.File("domain.pvd") def cb(*args): out.write(Q.mesh_m.coordinates) cb() else: cb = None # levelset test case if dim == 2: (x, y) = fd.SpatialCoordinate(Q.mesh_m) f = (pow(x, 2)) + pow(1.3 * y, 2) - 1. elif dim == 3: (x, y, z) = fd.SpatialCoordinate(Q.mesh_m) f = (pow(x, 2)) + pow(0.8 * y, 2) + pow(1.3 * z, 2) - 1. else: raise NotImplementedError J = fsz.LevelsetFunctional(f, Q, cb=cb, scale=0.1) if use_extension == "w_ext": ext = fs.ElasticityExtension(Q.V_r) if use_extension == "w_ext_fixed_dim": ext = fs.ElasticityExtension(Q.V_r, fixed_dims=[0]) else: ext = None q = fs.ControlVector(Q, inner, boundary_extension=ext) # these tolerances are not very stringent, but solutions are correct with # tighter tolerances, the combination # FeMultiGridControlSpace-ElasticityInnerProduct fails because the mesh # self-intersects (one should probably be more careful with the opt params) grad_tol = 1e-1 itlim = 15 itlimsub = 15 # Volume constraint vol = fsz.LevelsetFunctional(fd.Constant(1.0), Q, scale=1) initial_vol = vol.value(q, None) econ = fs.EqualityConstraint([vol], target_value=[initial_vol]) emul = ROL.StdVector(1) # ROL parameters params_dict = { 'Step': { 'Type': 'Augmented Lagrangian', 'Augmented Lagrangian': { 'Subproblem Step Type': 'Line Search', 'Penalty Parameter Growth Factor': 1.05, 'Print Intermediate Optimization History': True, 'Subproblem Iteration Limit': itlimsub }, 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } }, }, 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 50 } }, 'Status Test': { 'Gradient Tolerance': grad_tol, 'Step Tolerance': 1e-10, 'Iteration Limit': itlim } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q, econ=econ, emul=emul) solver = ROL.OptimizationSolver(problem, params) solver.solve() # verify that the norm of the gradient at optimum is small enough # and that the volume has not changed too much state = solver.getAlgorithmState() assert (state.gnorm < grad_tol) assert abs(vol.value(q, None) - initial_vol) < 1e-2
def get_rol_parameters(method, use_bfgs, options): paramlist_filename = options.get("paramlist_filename", None) if paramlist_filename is not None: paramlist = ROL.ParameterList(paramlist_filename) return paramlist paramsDict = {} # method = "Augmented Lagrangian" # method = "Fletcher" # method = "Moreau-Yosida Penalty" #method = 'Fletcher' assert method in [ "Augmented Lagrangian", "Fletcher", "Moreau-Yosida Penalty" ] paramsDict["Step"] = {"Type": method} #paramsDict["Step"]["Fletcher"] = {} #paramsDict["Step"]["Fletcher"]['Penalty Parameter'] = 1e8 paramsDict["Step"]["Trust Region"] = {} paramsDict["Step"]["Trust Region"]["Subproblem Solver"] = "Truncated CG" paramsDict["Step"]["Trust Region"]["Subproblem Model"] = "Kelley Sachs" paramsDict["Step"]["Trust Region"]['Initial Radius'] = 10 #paramsDict["Step"]["Trust Region"]["Subproblem Model"] = "Coleman-Li" #paramsDict["Step"]["Trust Region"]["Subproblem Solver"] = "Lin-More" #paramsDict["Step"]["Trust Region"]["Subproblem Model"] = "Lin-More" paramsDict["Step"]["Augmented Lagrangian"] = { # 'Initial Optimality Tolerance':1e-1, # 'Initial Feasibility Tolerance':1e-1, 'Use Default Problem Scaling': False, 'Print Intermediate Optimization History': (options.get('verbose', 0) > 2), 'Use Default Initial Penalty Parameter': False, 'Initial Penalty Parameter': 1e3, 'Maximum Penalty Parameter': 1e8, 'Penalty Parameter Growth Factor': 2, # 'Subproblem Iteration Limit':200 } # paramsDict["Step"]["Moreau-Yosida Penalty"] = { # 'Subproblem':{'Iteration Limit':20}, 'Initial Penalty Parameter':1e-2, # 'Penalty Parameter Growth Factor':2, 'Update Penalty':True} paramsDict["General"] = { 'Print Verbosity': int(options.get('verbose', 0) > 3) } paramsDict["General"]["Secant"] = {"Use as Hessian": False} if use_bfgs: paramsDict["General"]["Secant"]["Use as Hessian"] = True paramsDict["Step"]["Line Search"] = {} paramsDict["Step"]["Line Search"]["Descent Method"] = {} paramsDict["Step"]["Line Search"]["Descent Method"]["Type"] = \ "Quasi-Newton Method" paramsDict["Status Test"] = { "Gradient Tolerance": options.get('gtol', 1e-8), "Step Tolerance": options.get('xtol', 1e-14), "Constraint Tolerance": options.get('ctol', 1e-8), "Iteration Limit": options.get("maxiter", 100) } paramlist = ROL.ParameterList(paramsDict, "Parameters") return paramlist
def test_periodic(dim, inner_t, use_extension, pytestconfig): verbose = pytestconfig.getoption("verbose") """ Test template for PeriodicControlSpace.""" if dim == 2: mesh = fd.PeriodicUnitSquareMesh(30, 30) elif dim == 3: mesh = fd.PeriodicUnitCubeMesh(20, 20, 20) else: raise NotImplementedError Q = fs.FeControlSpace(mesh) inner = inner_t(Q) # levelset test case V = fd.FunctionSpace(Q.mesh_m, "DG", 0) sigma = fd.Function(V) if dim == 2: x, y = fd.SpatialCoordinate(Q.mesh_m) g = fd.sin(y * np.pi) # truncate at bdry f = fd.cos(2 * np.pi * x) * g perturbation = 0.05 * fd.sin(x * np.pi) * g**2 sigma.interpolate(g * fd.cos(2 * np.pi * x * (1 + perturbation))) elif dim == 3: x, y, z = fd.SpatialCoordinate(Q.mesh_m) g = fd.sin(y * np.pi) * fd.sin(z * np.pi) # truncate at bdry f = fd.cos(2 * np.pi * x) * g perturbation = 0.05 * fd.sin(x * np.pi) * g**2 sigma.interpolate(g * fd.cos(2 * np.pi * x * (1 + perturbation))) else: raise NotImplementedError class LevelsetFct(fs.ShapeObjective): def __init__(self, sigma, f, *args, **kwargs): super().__init__(*args, **kwargs) self.sigma = sigma # initial self.f = f # target Vdet = fd.FunctionSpace(Q.mesh_r, "DG", 0) self.detDT = fd.Function(Vdet) def value_form(self): # volume integral self.detDT.interpolate(fd.det(fd.grad(self.Q.T))) if min(self.detDT.vector()) > 0.05: integrand = (self.sigma - self.f)**2 else: integrand = np.nan * (self.sigma - self.f)**2 return integrand * fd.dx(metadata={"quadrature_degree": 1}) # if running with -v or --verbose, then export the shapes if verbose: out = fd.File("sigma.pvd") def cb(*args): out.write(sigma) else: cb = None J = LevelsetFct(sigma, f, Q, cb=cb) if use_extension == "w_ext": ext = fs.ElasticityExtension(Q.V_r) if use_extension == "w_ext_fixed_dim": ext = fs.ElasticityExtension(Q.V_r, fixed_dims=[0]) else: ext = None q = fs.ControlVector(Q, inner, boundary_extension=ext) """ move mesh a bit to check that we are not doing the taylor test in T=id """ g = q.clone() J.gradient(g, q, None) q.plus(g) J.update(q, None, 1) """ Start taylor test """ J.gradient(g, q, None) res = J.checkGradient(q, g, 5, 1) errors = [l[-1] for l in res] assert (errors[-1] < 0.11 * errors[-2]) q.scale(0) """ End taylor test """ # ROL parameters grad_tol = 1e-4 params_dict = { 'Step': { 'Type': 'Trust Region' }, 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 25 } }, 'Status Test': { 'Gradient Tolerance': grad_tol, 'Step Tolerance': 1e-10, 'Iteration Limit': 40 } } # assemble and solve ROL optimization problem params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() # verify that the norm of the gradient at optimum is small enough state = solver.getAlgorithmState() assert (state.gnorm < grad_tol)
def test_create_problem_seperately(): paramsDict["Step"]["Type"] = "Trust Region" params = ROL.ParameterList(paramsDict, "Parameters") problem = get_problem() solver = ROL.OptimizationSolver(problem, params) solver.solve()
'Feasibility Tolerance Update Exponent': 0.1, 'Feasibility Tolerance Decrease Exponent': 0.9, 'Print Intermediate Optimization History': True, 'Subproblem Step Type': 'Line Search', 'Subproblem Iteration Limit': 3 } }, 'Status Test': { 'Gradient Tolerance': 1e-15, 'Relative Gradient Tolerance': 1e-10, 'Step Tolerance': 1e-16, 'Relative Step Tolerance': 1e-10, 'Iteration Limit': 30 } } params = ROL.ParameterList(paramsDict, "Parameters") bound_constraint = ROL.Bounds(lower, upper, 1.0) optimProblem = ROL.OptimizationProblem(obj, x, bnd=bound_constraint, econ=volConstr, emul=l_initializacao) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve() print("aqui mudouuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu") q.assign(0.1) interm = obj.resposta() del x
def test_box_constraint(pytestconfig): n = 5 mesh = fd.UnitSquareMesh(n, n) T = mesh.coordinates.copy(deepcopy=True) (x, y) = fd.SpatialCoordinate(mesh) T.interpolate(T + fd.Constant((1, 0)) * x * y) mesh = fd.Mesh(T) Q = fs.FeControlSpace(mesh) inner = fs.LaplaceInnerProduct(Q, fixed_bids=[1]) mesh_m = Q.mesh_m q = fs.ControlVector(Q, inner) if pytestconfig.getoption("verbose"): out = fd.File("domain.pvd") def cb(): out.write(mesh_m.coordinates) else: def cb(): pass lower_bound = Q.T.copy(deepcopy=True) lower_bound.interpolate(fd.Constant((-0.0, -0.0))) upper_bound = Q.T.copy(deepcopy=True) upper_bound.interpolate(fd.Constant((+1.3, +0.9))) J = fsz.MoYoBoxConstraint(1, [2], Q, lower_bound=lower_bound, upper_bound=upper_bound, cb=cb, quadrature_degree=100) g = q.clone() J.gradient(g, q, None) taylor_result = J.checkGradient(q, g, 9, 1) for i in range(len(taylor_result) - 1): if taylor_result[i][3] > 1e-7: assert taylor_result[i + 1][3] <= taylor_result[i][3] * 0.11 params_dict = { 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } } }, 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 2 } }, 'Status Test': { 'Gradient Tolerance': 1e-10, 'Step Tolerance': 1e-10, 'Iteration Limit': 150 } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() Tvec = Q.T.vector() nodes = fd.DirichletBC(Q.V_r, fd.Constant((0.0, 0.0)), [2]).nodes assert np.all(Tvec[nodes, 0] <= 1.3 + 1e-4) assert np.all(Tvec[nodes, 1] <= 0.9 + 1e-4)
def test_objective_plus_box_constraint(pytestconfig): n = 10 mesh = fd.UnitSquareMesh(n, n) T = mesh.coordinates.copy(deepcopy=True) (x, y) = fd.SpatialCoordinate(mesh) T.interpolate(T + fd.Constant((0, 0))) mesh = fd.Mesh(T) Q = fs.FeControlSpace(mesh) inner = fs.LaplaceInnerProduct(Q) mesh_m = Q.mesh_m q = fs.ControlVector(Q, inner) if pytestconfig.getoption("verbose"): out = fd.File("domain.pvd") def cb(): out.write(mesh_m.coordinates) else: def cb(): pass lower_bound = Q.T.copy(deepcopy=True) lower_bound.interpolate(fd.Constant((-0.2, -0.2))) upper_bound = Q.T.copy(deepcopy=True) upper_bound.interpolate(fd.Constant((+1.2, +1.2))) # levelset test case (x, y) = fd.SpatialCoordinate(Q.mesh_m) f = (pow(x - 0.5, 2)) + pow(y - 0.5, 2) - 4. J1 = fsz.LevelsetFunctional(f, Q, cb=cb, quadrature_degree=10) J2 = fsz.MoYoBoxConstraint(10., [1, 2, 3, 4], Q, lower_bound=lower_bound, upper_bound=upper_bound, cb=cb, quadrature_degree=10) J3 = fsz.MoYoSpectralConstraint(100, fd.Constant(0.6), Q, cb=cb, quadrature_degree=100) J = 0.1 * J1 + J2 + J3 g = q.clone() J.gradient(g, q, None) taylor_result = J.checkGradient(q, g, 9, 1) for i in range(len(taylor_result) - 1): if taylor_result[i][3] > 1e-6 and taylor_result[i][3] < 1e-3: assert taylor_result[i + 1][3] <= taylor_result[i][3] * 0.15 params_dict = { 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } } }, 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 2 } }, 'Status Test': { 'Gradient Tolerance': 1e-10, 'Step Tolerance': 1e-10, 'Iteration Limit': 10 } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() Tvec = Q.T.vector() nodes = fd.DirichletBC(Q.V_r, fd.Constant((0.0, 0.0)), [2]).nodes assert np.all(Tvec[nodes, 0] <= 1.2 + 1e-1) assert np.all(Tvec[nodes, 1] <= 1.2 + 1e-1)
V0) d = LA(d.vector(), dot_product) x.checkVector(d, g) jd = Function(V0) jd = LA(jd.vector(), dot_product) lower = interpolate(Constant(0.0), V0) lower = LA(lower.vector(), dot_product) upper = interpolate(Constant(1.0), V0) upper = LA(upper.vector(), dot_product) # Instantiate Objective class for poisson problem obj = ObjR(dot_product) obj.checkGradient(x, d, 7, 1) volConstr = VolConstraint(dot_product) volConstr.checkApplyJacobian(x, d, jd, 6, 1) volConstr.checkAdjointConsistencyJacobian(v, d, x) with open('input.xml', 'r') as myfile: parametersXML = myfile.read().replace('\n', '') set_log_level(30) params = ROL.ParameterList(parametersXML) bound_constraint = ROL.BoundConstraint(lower, upper, 1.0) alg2 = ROL.Algorithm("Augmented Lagrangian", params) penaltyParam = 1 augLag = ROL.AugmentedLagrangian(obj, volConstr, l, penaltyParam, x, c, params) alg2.run(x, l, augLag, volConstr, bound_constraint)
def test_gradient_talyor_remainder_v2(): from ROL.firedrake_vector import FiredrakeVector as FeVector import ROL comm = spyro.utils.mpi_init(model) mesh, V = spyro.io.read_mesh(model, comm) vp_guess = _make_vp_guess(V, mesh) sources = spyro.Sources(model, mesh, V, comm) receivers = spyro.Receivers(model, mesh, V, comm) vp_exact = _make_vp_exact(V, mesh) _, p_exact_recv = spyro.solvers.forward(model, mesh, comm, vp_exact, sources, wavelet, receivers) qr_x, _, _ = spyro.domains.quadrature.quadrature_rules(V) class L2Inner(object): def __init__(self): self.A = assemble(TrialFunction(V) * TestFunction(V) * dx(rule=qr_x), mat_type="matfree") self.Ap = as_backend_type(self.A).mat() def eval(self, _u, _v): upet = as_backend_type(_u).vec() vpet = as_backend_type(_v).vec() A_u = self.Ap.createVecLeft() self.Ap.mult(upet, A_u) return vpet.dot(A_u) class Objective(ROL.Objective): def __init__(self, inner_product): ROL.Objective.__init__(self) self.inner_product = inner_product self.p_guess = None self.misfit = None def value(self, x, tol): """Compute the functional""" self.p_guess, p_guess_recv = spyro.solvers.forward( model, mesh, comm, vp_guess, sources, wavelet, receivers, output=False, ) self.misfit = spyro.utils.evaluate_misfit(model, p_guess_recv, p_exact_recv) J = spyro.utils.compute_functional(model, self.misfit) return J def gradient(self, g, x, tol): dJ = spyro.solvers.gradient( model, mesh, comm, vp_guess, receivers, self.p_guess, self.misfit, ) g.scale(0) g.vec += dJ def update(self, x, flag, iteration): vp_guess.assign(Function(V, x.vec, name="velocity")) paramsDict = { "Step": { "Line Search": { "Descent Method": { "Type": "Quasi-Newton Method" } }, "Type": "Line Search", }, "Status Test": { "Gradient Tolerance": 1e-12, "Iteration Limit": 20 }, } params = ROL.ParameterList(paramsDict, "Parameters") inner_product = L2Inner() obj = Objective(inner_product) u = Function(V).assign(vp_guess) opt = FeVector(u.vector(), inner_product) d = Function(V) x, y = SpatialCoordinate(mesh) # d.interpolate(sin(x * pi) * sin(y * pi)) d.vector()[:] = np.random.rand(V.dim()) # d.assign(0.1) d = FeVector(d.vector(), inner_product) # check the gradient using d model pertubation 4 iterations and 2nd order test obj.checkGradient(opt, d, 4, 2)
def test_spectral_constraint(pytestconfig): n = 5 mesh = fd.UnitSquareMesh(n, n) T = fd.Function(fd.VectorFunctionSpace( mesh, "CG", 1)).interpolate(fd.SpatialCoordinate(mesh) - fd.Constant((0.5, 0.5))) mesh = fd.Mesh(T) Q = fs.FeControlSpace(mesh) inner = fs.LaplaceInnerProduct(Q) mesh_m = Q.mesh_m q = fs.ControlVector(Q, inner) if pytestconfig.getoption("verbose"): out = fd.File("domain.pvd") def cb(): out.write(mesh_m.coordinates) else: def cb(): pass J = fsz.MoYoSpectralConstraint(0.5, fd.Constant(0.1), Q, cb=cb) q.fun += Q.T g = q.clone() J.update(q, None, -1) J.gradient(g, q, None) cb() taylor_result = J.checkGradient(q, g, 7, 1) for i in range(len(taylor_result) - 1): assert taylor_result[i + 1][3] <= taylor_result[i][3] * 0.11 params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 2 } }, 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } } }, 'Status Test': { 'Gradient Tolerance': 1e-10, 'Step Tolerance': 1e-10, 'Iteration Limit': 150 } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() Tvec = Q.T.vector()[:, :] for i in range(Tvec.shape[0]): assert abs(Tvec[i, 0]) < 0.55 + 1e-4 assert abs(Tvec[i, 1]) < 0.55 + 1e-4 assert np.any(np.abs(Tvec) > 0.55 - 1e-4)
J = fsz.LevelsetFunctional(f, Q, cb=lambda: out.write(mesh_m.coordinates)) q = fs.ControlVector(Q, inner) params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 5 } }, 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } } }, 'Status Test': { 'Gradient Tolerance': 1e-5, 'Step Tolerance': 1e-6, 'Iteration Limit': 40 } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve()
def test_levelset(dim, inner_t, controlspace_t, use_extension, pytestconfig): verbose = pytestconfig.getoption("verbose") """ Test template for fsz.LevelsetFunctional.""" clscale = 0.1 if dim == 2 else 0.2 # make the mesh a bit coarser if we are using a multigrid control space as # we are refining anyway if controlspace_t == fs.FeMultiGridControlSpace: clscale *= 2 if dim == 2: mesh = fs.DiskMesh(clscale) elif dim == 3: mesh = fs.SphereMesh(clscale) else: raise NotImplementedError if controlspace_t == fs.BsplineControlSpace: if dim == 2: bbox = [(-2, 2), (-2, 2)] orders = [2, 2] levels = [4, 4] else: bbox = [(-3, 3), (-3, 3), (-3, 3)] orders = [2, 2, 2] levels = [3, 3, 3] Q = fs.BsplineControlSpace(mesh, bbox, orders, levels) elif controlspace_t == fs.FeMultiGridControlSpace: Q = fs.FeMultiGridControlSpace(mesh, refinements=1, order=2) else: Q = controlspace_t(mesh) inner = inner_t(Q) # if running with -v or --verbose, then export the shapes if verbose: out = fd.File("domain.pvd") def cb(*args): out.write(Q.mesh_m.coordinates) cb() else: cb = None # levelset test case if dim == 2: (x, y) = fd.SpatialCoordinate(Q.mesh_m) f = (pow(x, 2)) + pow(1.3 * y, 2) - 1. elif dim == 3: (x, y, z) = fd.SpatialCoordinate(Q.mesh_m) f = (pow(x, 2)) + pow(0.8 * y, 2) + pow(1.3 * z, 2) - 1. else: raise NotImplementedError J = fsz.LevelsetFunctional(f, Q, cb=cb, scale=0.1) if use_extension == "w_ext": ext = fs.ElasticityExtension(Q.V_r) if use_extension == "w_ext_fixed_dim": ext = fs.ElasticityExtension(Q.V_r, fixed_dims=[0]) else: ext = None q = fs.ControlVector(Q, inner, boundary_extension=ext) """ move mesh a bit to check that we are not doing the taylor test in T=id """ g = q.clone() J.gradient(g, q, None) q.plus(g) J.update(q, None, 1) """ Start taylor test """ J.gradient(g, q, None) res = J.checkGradient(q, g, 5, 1) errors = [l[-1] for l in res] assert (errors[-1] < 0.11 * errors[-2]) q.scale(0) """ End taylor test """ grad_tol = 1e-6 if dim == 2 else 1e-4 # ROL parameters params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 50 } }, 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } } }, 'Status Test': { 'Gradient Tolerance': grad_tol, 'Step Tolerance': 1e-10, 'Iteration Limit': 150 } } # assemble and solve ROL optimization problem params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() # verify that the norm of the gradient at optimum is small enough state = solver.getAlgorithmState() assert (state.gnorm < grad_tol)