def solve(self): """ Solve the optimization problem and return the optimized parameters. """ bnd = self.bounds econs = self.constraints[0][0] emuls = self.constraints[0][1] icons = self.constraints[1][0] imuls = self.constraints[1][1] if len(icons) > 0: zeros = [i.clone() for i in imuls] ibnds = [ROL.Bounds(z, isLower=True) for z in zeros] else: ibnds = [] rolproblem = ROL.OptimizationProblem(self.rolobjective, self.rolvector, bnd=bnd, econs=econs, emuls=emuls, icons=icons, imuls=imuls, ibnds=ibnds) x = self.rolvector params = ROL.ParameterList(self.params_dict, "Parameters") self.solver = ROL.OptimizationSolver(rolproblem, params) self.solver.solve() return self.problem.reduced_functional.controls.delist(x.dat)
def run_L2tracking_optimization(write_output=False): """ Test template for fsz.LevelsetFunctional.""" # tool for developing new tests, allows storing shape iterates if write_output: out = fd.File("domain.pvd") def cb(*args): out.write(Q.mesh_m.coordinates) cb() else: cb = None # setup problem mesh = fd.UnitSquareMesh(30, 30) Q = fs.FeControlSpace(mesh) inner = fs.ElasticityInnerProduct(Q) q = fs.ControlVector(Q, inner) # setup PDE constraint mesh_m = Q.mesh_m e = PoissonSolver(mesh_m) # create PDEconstrained objective functional J_ = L2trackingObjective(e, Q, cb=cb) J = fs.ReducedObjective(J_, e) # ROL parameters params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 10 } }, 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } }, }, 'Status Test': { 'Gradient Tolerance': 1e-4, 'Step Tolerance': 1e-5, 'Iteration Limit': 15 } } # assemble and solve ROL optimization problem params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() # verify that the norm of the gradient at optimum is small enough state = solver.getAlgorithmState() assert (state.gnorm < 1e-4)
def run_HS13(Vec, params_dict): obj = HS13_Obj() x = Vec(2) d = Vec(2) HS13_initial_guess(x) l = Vec(1) l[0] = 1.0 icon = HS13_Icon() ilower = Vec(1) HS13_Ibnds(ilower) ibnd = ROL.Bounds(ilower, isLower=True) lower = Vec(2) HS13_Bnd(lower) bnd = ROL.Bounds(lower, isLower=True) params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(obj, x, bnd=bnd, icons=[icon], imuls=[l], ibnds=[ibnd]) solver = ROL.OptimizationSolver(problem, params) solver.solve() print(x[0], x[1]) assert HS13_minimum(x)
def test_equality_constraint(pytestconfig): mesh = fs.DiskMesh(0.05, radius=2.) Q = fs.FeControlSpace(mesh) inner = fs.ElasticityInnerProduct(Q, direct_solve=True) mesh_m = Q.mesh_m (x, y) = fd.SpatialCoordinate(mesh_m) q = fs.ControlVector(Q, inner) if pytestconfig.getoption("verbose"): out = fd.File("domain.pvd") def cb(*args): out.write(Q.mesh_m.coordinates) else: cb = None f = (pow(2 * x, 2)) + pow(y - 0.1, 2) - 1.2 J = fsz.LevelsetFunctional(f, Q, cb=cb) vol = fsz.LevelsetFunctional(fd.Constant(1.0), Q) e = fs.EqualityConstraint([vol]) emul = ROL.StdVector(1) params_dict = { 'Step': { 'Type': 'Augmented Lagrangian', 'Augmented Lagrangian': { 'Subproblem Step Type': 'Line Search', 'Penalty Parameter Growth Factor': 2., 'Initial Penalty Parameter': 1., 'Subproblem Iteration Limit': 20, }, 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } }, }, 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 5 } }, 'Status Test': { 'Gradient Tolerance': 1e-4, 'Step Tolerance': 1e-10, 'Iteration Limit': 10 } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q, econ=e, emul=emul) solver = ROL.OptimizationSolver(problem, params) solver.solve() state = solver.getAlgorithmState() assert (state.gnorm < 1e-4) assert (state.cnorm < 1e-6)
def run_U(algo): obj = MyObj() paramsDict["Step"]["Type"] = algo params = ROL.ParameterList(paramsDict, "Parameters") x = NumpyVector(2) optimProblem = ROL.OptimizationProblem(obj, x) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve() print(x.data) assert round(x[0] - 1.0, 6) == 0.0 assert round(x[1], 6) == 0.0
def test_create_bounds_seperately(): obj = MyObj() paramsDict["Step"]["Type"] = "Trust Region" params = ROL.ParameterList(paramsDict, "Parameters") x = NumpyVector(2) bnd = createBounds() bnd.test() optimProblem = ROL.OptimizationProblem(obj, x, bnd=bnd) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve() assert round(x[0] - 0.7, 6) == 0.0 assert round(x[1], 6) == 0.0
def run_HS4(Vec, params_dict): obj = HS4_Obj() x = Vec(2) HS4_initial_guess(x) lower = Vec(2) upper = Vec(2) HS4_Bnd(lower, upper) bnd = ROL.Bounds(lower, upper, 1.0) params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(obj, x, bnd=bnd) solver = ROL.OptimizationSolver(problem, params) solver.solve() print(x[0], x[1]) assert HS4_minimum(x)
def run_E(algo): obj = MyObj2() paramsDict["Step"]["Type"] = algo params = ROL.ParameterList(paramsDict, "Parameters") x = NumpyVector(2) x[0] = 0.5 * 0.5**2 x[1] = 0.5 * 0.5**2 l = NumpyVector(1) con = EqConstraint() optimProblem = ROL.OptimizationProblem(obj, x, econ=con, emul=l) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve() assert round(x[0] - 0.707106, 5) == 0.0 assert round(x[1] - 0.707106, 5) == 0.0
def run_HS28(Vec, params_dict): obj = HS28_Obj() x = Vec(3) HS28_initial_guess(x) # obj.checkGradient(x) # obj.checkHessVec(x) HS28_initial_guess(x) l = Vec(1) l[0] = 0.0 con = HS28_Econ() params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(obj, x, econs=[con], emuls=[l]) solver = ROL.OptimizationSolver(problem, params) solver.solve() print(x[0], x[1], x[2]) assert HS28_minimum(x)
def run_B(algo): obj = MyObj() paramsDict["Step"]["Type"] = algo params = ROL.ParameterList(paramsDict, "Parameters") x = NumpyVector(2) x_lo = NumpyVector(2) x_lo[0] = -1 x_lo[1] = -1 x_up = NumpyVector(2) x_up[0] = +0.7 x_up[1] = +0.7 bnd = ROL.Bounds(x_lo, x_up, 1.0) optimProblem = ROL.OptimizationProblem(obj, x, bnd=bnd) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve() assert round(x[0] - 0.7, 6) == 0.0 assert round(x[1], 6) == 0.0
def test_TimeTracking(): """ Main test.""" # setup problem mesh = fd.UnitSquareMesh(20, 20) Q = fs.FeControlSpace(mesh) inner = fs.LaplaceInnerProduct(Q, fixed_bids=[1, 2, 3, 4]) q = fs.ControlVector(Q, inner) # create PDEconstrained objective functional J = TimeTracking(Q) # ROL parameters params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 25 } }, 'Step': { 'Type': 'Trust Region' }, 'Status Test': { 'Gradient Tolerance': 1e-3, 'Step Tolerance': 1e-8, 'Iteration Limit': 20 } } # assemble and solve ROL optimization problem params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() # verify that the norm of the gradient at optimum is small enough state = solver.getAlgorithmState() assert (state.gnorm < 1e-3)
def run_HS29(Vec, params_dict): params = ROL.ParameterList(params_dict, "Parameters") obj = HS29_Obj() x = Vec(3) x[0] = 1. x[1] = 2. x[2] = .1 d = Vec(3) d[0] = 1 d[1] = -1 d[2] = 1. v = Vec(3) v[0] = 1 v[1] = -1 v[2] = 1. # obj.checkGradient(x) # obj.checkHessVec(x, d, 4, 1) HS29_initial_guess(x) l = Vec(1) l[0] = 0.0 con = HS29_Icon() jv = Vec(1) jv[0] = 1. # con.checkApplyJacobian(x, d, jv, 4, 1) # con.checkAdjointConsistencyJacobian(jv, d, x) # con.checkApplyAdjointHessian(x, jv, d, v, 5, 1) ilower = Vec(1) HS29_Ibnds(ilower) ibnd = ROL.Bounds(ilower, isLower=True) problem = ROL.OptimizationProblem(obj, x, icons=[con], imuls=[l], ibnds=[ibnd]) solver = ROL.OptimizationSolver(problem, params) solver.solve() print(x[0], x[1], x[2]) assert HS29_minimum(x)
def rol_minimize(fun, x0, method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, options={}, x_grad=None): obj = ROLObj(fun, jac, hess, hessp) if x_grad is not None: print("Testing objective", flush=True) xg = get_rol_numpy_vector(x_grad) d = get_rol_numpy_vector(np.random.normal(0, 1, (x_grad.shape[0]))) obj.checkGradient(xg, d, 12, 1) obj.checkHessVec(xg, d, 12, 1) use_bfgs = False if hess is None and hessp is None: use_bfgs = True if type(hess) == BFGS: use_bfgs = True for constr in constraints: if (type(constr) != LinearConstraint and (type(constr.hess) == BFGS or constr.hess is None)): use_bfgs = True constr.hess = None assert method == 'rol-trust-constr' or method == None if 'step-type' in options: rol_method = options['step-type'] del options['step-type'] else: rol_method = 'Augmented Lagrangian' params = get_rol_parameters(rol_method, use_bfgs, options) x = get_rol_numpy_vector(x0) bnd, econ, emul, icon, imul, ibnd = get_constraints( constraints, bounds, x_grad) optimProblem = ROL.OptimizationProblem(obj, x, bnd=bnd, econs=econ, emuls=emul, icons=icon, imuls=imul, ibnds=ibnd) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve(options.get('verbose', 0)) state = solver.getAlgorithmState() success = state.statusFlag.name == 'EXITSTATUS_CONVERGED' res = OptimizeResult( x=rol_vector_to_numpy(x), fun=state.value, cnorm=state.cnorm, gnorm=state.gnorm, snorm=state.snorm, success=success, nit=state.iter, nfev=state.nfval, ngev=state.ngrad, constr_nfev=state.ncval, status=state.statusFlag.name, message=f'Optimization terminated early {state.statusFlag.name}') return res
def test_create_problem_seperately(): paramsDict["Step"]["Type"] = "Trust Region" params = ROL.ParameterList(paramsDict, "Parameters") problem = get_problem() solver = ROL.OptimizationSolver(problem, params) solver.solve()
'Gradient Tolerance': 1e-15, 'Relative Gradient Tolerance': 1e-10, 'Step Tolerance': 1e-16, 'Relative Step Tolerance': 1e-10, 'Iteration Limit': 30 } } params = ROL.ParameterList(paramsDict, "Parameters") bound_constraint = ROL.Bounds(lower, upper, 1.0) optimProblem = ROL.OptimizationProblem(obj, x, bnd=bound_constraint, econ=volConstr, emul=l_initializacao) solver = ROL.OptimizationSolver(optimProblem, params) solver.solve() print("aqui mudouuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu") q.assign(0.1) interm = obj.resposta() del x interm = FeVector(interm.vector(), dot_product) params2 = ROL.ParameterList(paramsDict2, "Parameters") bound_constraint = ROL.Bounds(lower, upper, 1.0) optimProblem = ROL.OptimizationProblem(obj, interm, bnd=bound_constraint, econ=volConstr, emul=l_initializacao)
def test_box_constraint(pytestconfig): n = 5 mesh = fd.UnitSquareMesh(n, n) T = mesh.coordinates.copy(deepcopy=True) (x, y) = fd.SpatialCoordinate(mesh) T.interpolate(T + fd.Constant((1, 0)) * x * y) mesh = fd.Mesh(T) Q = fs.FeControlSpace(mesh) inner = fs.LaplaceInnerProduct(Q, fixed_bids=[1]) mesh_m = Q.mesh_m q = fs.ControlVector(Q, inner) if pytestconfig.getoption("verbose"): out = fd.File("domain.pvd") def cb(): out.write(mesh_m.coordinates) else: def cb(): pass lower_bound = Q.T.copy(deepcopy=True) lower_bound.interpolate(fd.Constant((-0.0, -0.0))) upper_bound = Q.T.copy(deepcopy=True) upper_bound.interpolate(fd.Constant((+1.3, +0.9))) J = fsz.MoYoBoxConstraint(1, [2], Q, lower_bound=lower_bound, upper_bound=upper_bound, cb=cb, quadrature_degree=100) g = q.clone() J.gradient(g, q, None) taylor_result = J.checkGradient(q, g, 9, 1) for i in range(len(taylor_result) - 1): if taylor_result[i][3] > 1e-7: assert taylor_result[i + 1][3] <= taylor_result[i][3] * 0.11 params_dict = { 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } } }, 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 2 } }, 'Status Test': { 'Gradient Tolerance': 1e-10, 'Step Tolerance': 1e-10, 'Iteration Limit': 150 } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() Tvec = Q.T.vector() nodes = fd.DirichletBC(Q.V_r, fd.Constant((0.0, 0.0)), [2]).nodes assert np.all(Tvec[nodes, 0] <= 1.3 + 1e-4) assert np.all(Tvec[nodes, 1] <= 0.9 + 1e-4)
def test_objective_plus_box_constraint(pytestconfig): n = 10 mesh = fd.UnitSquareMesh(n, n) T = mesh.coordinates.copy(deepcopy=True) (x, y) = fd.SpatialCoordinate(mesh) T.interpolate(T + fd.Constant((0, 0))) mesh = fd.Mesh(T) Q = fs.FeControlSpace(mesh) inner = fs.LaplaceInnerProduct(Q) mesh_m = Q.mesh_m q = fs.ControlVector(Q, inner) if pytestconfig.getoption("verbose"): out = fd.File("domain.pvd") def cb(): out.write(mesh_m.coordinates) else: def cb(): pass lower_bound = Q.T.copy(deepcopy=True) lower_bound.interpolate(fd.Constant((-0.2, -0.2))) upper_bound = Q.T.copy(deepcopy=True) upper_bound.interpolate(fd.Constant((+1.2, +1.2))) # levelset test case (x, y) = fd.SpatialCoordinate(Q.mesh_m) f = (pow(x - 0.5, 2)) + pow(y - 0.5, 2) - 4. J1 = fsz.LevelsetFunctional(f, Q, cb=cb, quadrature_degree=10) J2 = fsz.MoYoBoxConstraint(10., [1, 2, 3, 4], Q, lower_bound=lower_bound, upper_bound=upper_bound, cb=cb, quadrature_degree=10) J3 = fsz.MoYoSpectralConstraint(100, fd.Constant(0.6), Q, cb=cb, quadrature_degree=100) J = 0.1 * J1 + J2 + J3 g = q.clone() J.gradient(g, q, None) taylor_result = J.checkGradient(q, g, 9, 1) for i in range(len(taylor_result) - 1): if taylor_result[i][3] > 1e-6 and taylor_result[i][3] < 1e-3: assert taylor_result[i + 1][3] <= taylor_result[i][3] * 0.15 params_dict = { 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } } }, 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 2 } }, 'Status Test': { 'Gradient Tolerance': 1e-10, 'Step Tolerance': 1e-10, 'Iteration Limit': 10 } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() Tvec = Q.T.vector() nodes = fd.DirichletBC(Q.V_r, fd.Constant((0.0, 0.0)), [2]).nodes assert np.all(Tvec[nodes, 0] <= 1.2 + 1e-1) assert np.all(Tvec[nodes, 1] <= 1.2 + 1e-1)
def test_periodic(dim, inner_t, use_extension, pytestconfig): verbose = pytestconfig.getoption("verbose") """ Test template for PeriodicControlSpace.""" if dim == 2: mesh = fd.PeriodicUnitSquareMesh(30, 30) elif dim == 3: mesh = fd.PeriodicUnitCubeMesh(20, 20, 20) else: raise NotImplementedError Q = fs.FeControlSpace(mesh) inner = inner_t(Q) # levelset test case V = fd.FunctionSpace(Q.mesh_m, "DG", 0) sigma = fd.Function(V) if dim == 2: x, y = fd.SpatialCoordinate(Q.mesh_m) g = fd.sin(y * np.pi) # truncate at bdry f = fd.cos(2 * np.pi * x) * g perturbation = 0.05 * fd.sin(x * np.pi) * g**2 sigma.interpolate(g * fd.cos(2 * np.pi * x * (1 + perturbation))) elif dim == 3: x, y, z = fd.SpatialCoordinate(Q.mesh_m) g = fd.sin(y * np.pi) * fd.sin(z * np.pi) # truncate at bdry f = fd.cos(2 * np.pi * x) * g perturbation = 0.05 * fd.sin(x * np.pi) * g**2 sigma.interpolate(g * fd.cos(2 * np.pi * x * (1 + perturbation))) else: raise NotImplementedError class LevelsetFct(fs.ShapeObjective): def __init__(self, sigma, f, *args, **kwargs): super().__init__(*args, **kwargs) self.sigma = sigma # initial self.f = f # target Vdet = fd.FunctionSpace(Q.mesh_r, "DG", 0) self.detDT = fd.Function(Vdet) def value_form(self): # volume integral self.detDT.interpolate(fd.det(fd.grad(self.Q.T))) if min(self.detDT.vector()) > 0.05: integrand = (self.sigma - self.f)**2 else: integrand = np.nan * (self.sigma - self.f)**2 return integrand * fd.dx(metadata={"quadrature_degree": 1}) # if running with -v or --verbose, then export the shapes if verbose: out = fd.File("sigma.pvd") def cb(*args): out.write(sigma) else: cb = None J = LevelsetFct(sigma, f, Q, cb=cb) if use_extension == "w_ext": ext = fs.ElasticityExtension(Q.V_r) if use_extension == "w_ext_fixed_dim": ext = fs.ElasticityExtension(Q.V_r, fixed_dims=[0]) else: ext = None q = fs.ControlVector(Q, inner, boundary_extension=ext) """ move mesh a bit to check that we are not doing the taylor test in T=id """ g = q.clone() J.gradient(g, q, None) q.plus(g) J.update(q, None, 1) """ Start taylor test """ J.gradient(g, q, None) res = J.checkGradient(q, g, 5, 1) errors = [l[-1] for l in res] assert (errors[-1] < 0.11 * errors[-2]) q.scale(0) """ End taylor test """ # ROL parameters grad_tol = 1e-4 params_dict = { 'Step': { 'Type': 'Trust Region' }, 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 25 } }, 'Status Test': { 'Gradient Tolerance': grad_tol, 'Step Tolerance': 1e-10, 'Iteration Limit': 40 } } # assemble and solve ROL optimization problem params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() # verify that the norm of the gradient at optimum is small enough state = solver.getAlgorithmState() assert (state.gnorm < grad_tol)
def test_levelset(dim, inner_t, controlspace_t, use_extension, pytestconfig): verbose = pytestconfig.getoption("verbose") """ Test template for fsz.LevelsetFunctional.""" clscale = 0.1 if dim == 2 else 0.2 # make the mesh a bit coarser if we are using a multigrid control space as # we are refining anyway if controlspace_t == fs.FeMultiGridControlSpace: clscale *= 4 if dim == 2: mesh = fs.DiskMesh(clscale) elif dim == 3: mesh = fs.SphereMesh(clscale) else: raise NotImplementedError if controlspace_t == fs.BsplineControlSpace: if dim == 2: bbox = [(-2, 2), (-2, 2)] orders = [2, 2] levels = [4, 4] else: bbox = [(-3, 3), (-3, 3), (-3, 3)] orders = [2, 2, 2] levels = [3, 3, 3] Q = fs.BsplineControlSpace(mesh, bbox, orders, levels) elif controlspace_t == fs.FeMultiGridControlSpace: Q = fs.FeMultiGridControlSpace(mesh, refinements=1, order=2) else: Q = controlspace_t(mesh) inner = inner_t(Q) # if running with -v or --verbose, then export the shapes if verbose: out = fd.File("domain.pvd") def cb(*args): out.write(Q.mesh_m.coordinates) cb() else: cb = None # levelset test case if dim == 2: (x, y) = fd.SpatialCoordinate(Q.mesh_m) f = (pow(x, 2)) + pow(1.3 * y, 2) - 1. elif dim == 3: (x, y, z) = fd.SpatialCoordinate(Q.mesh_m) f = (pow(x, 2)) + pow(0.8 * y, 2) + pow(1.3 * z, 2) - 1. else: raise NotImplementedError J = fsz.LevelsetFunctional(f, Q, cb=cb, scale=0.1) if use_extension == "w_ext": ext = fs.ElasticityExtension(Q.V_r) if use_extension == "w_ext_fixed_dim": ext = fs.ElasticityExtension(Q.V_r, fixed_dims=[0]) else: ext = None q = fs.ControlVector(Q, inner, boundary_extension=ext) # these tolerances are not very stringent, but solutions are correct with # tighter tolerances, the combination # FeMultiGridControlSpace-ElasticityInnerProduct fails because the mesh # self-intersects (one should probably be more careful with the opt params) grad_tol = 1e-1 itlim = 15 itlimsub = 15 # Volume constraint vol = fsz.LevelsetFunctional(fd.Constant(1.0), Q, scale=1) initial_vol = vol.value(q, None) econ = fs.EqualityConstraint([vol], target_value=[initial_vol]) emul = ROL.StdVector(1) # ROL parameters params_dict = { 'Step': { 'Type': 'Augmented Lagrangian', 'Augmented Lagrangian': { 'Subproblem Step Type': 'Line Search', 'Penalty Parameter Growth Factor': 1.05, 'Print Intermediate Optimization History': True, 'Subproblem Iteration Limit': itlimsub }, 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } }, }, 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 50 } }, 'Status Test': { 'Gradient Tolerance': grad_tol, 'Step Tolerance': 1e-10, 'Iteration Limit': itlim } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q, econ=econ, emul=emul) solver = ROL.OptimizationSolver(problem, params) solver.solve() # verify that the norm of the gradient at optimum is small enough # and that the volume has not changed too much state = solver.getAlgorithmState() assert (state.gnorm < grad_tol) assert abs(vol.value(q, None) - initial_vol) < 1e-2
def test_spectral_constraint(pytestconfig): n = 5 mesh = fd.UnitSquareMesh(n, n) T = fd.Function(fd.VectorFunctionSpace( mesh, "CG", 1)).interpolate(fd.SpatialCoordinate(mesh) - fd.Constant((0.5, 0.5))) mesh = fd.Mesh(T) Q = fs.FeControlSpace(mesh) inner = fs.LaplaceInnerProduct(Q) mesh_m = Q.mesh_m q = fs.ControlVector(Q, inner) if pytestconfig.getoption("verbose"): out = fd.File("domain.pvd") def cb(): out.write(mesh_m.coordinates) else: def cb(): pass J = fsz.MoYoSpectralConstraint(0.5, fd.Constant(0.1), Q, cb=cb) q.fun += Q.T g = q.clone() J.update(q, None, -1) J.gradient(g, q, None) cb() taylor_result = J.checkGradient(q, g, 7, 1) for i in range(len(taylor_result) - 1): assert taylor_result[i + 1][3] <= taylor_result[i][3] * 0.11 params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 2 } }, 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } } }, 'Status Test': { 'Gradient Tolerance': 1e-10, 'Step Tolerance': 1e-10, 'Iteration Limit': 150 } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() Tvec = Q.T.vector()[:, :] for i in range(Tvec.shape[0]): assert abs(Tvec[i, 0]) < 0.55 + 1e-4 assert abs(Tvec[i, 1]) < 0.55 + 1e-4 assert np.any(np.abs(Tvec) > 0.55 - 1e-4)
J = fsz.LevelsetFunctional(f, Q, cb=lambda: out.write(mesh_m.coordinates)) q = fs.ControlVector(Q, inner) params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 5 } }, 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } } }, 'Status Test': { 'Gradient Tolerance': 1e-5, 'Step Tolerance': 1e-6, 'Iteration Limit': 40 } } params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve()
def test_levelset(dim, inner_t, controlspace_t, use_extension, pytestconfig): verbose = pytestconfig.getoption("verbose") """ Test template for fsz.LevelsetFunctional.""" clscale = 0.1 if dim == 2 else 0.2 # make the mesh a bit coarser if we are using a multigrid control space as # we are refining anyway if controlspace_t == fs.FeMultiGridControlSpace: clscale *= 2 if dim == 2: mesh = fs.DiskMesh(clscale) elif dim == 3: mesh = fs.SphereMesh(clscale) else: raise NotImplementedError if controlspace_t == fs.BsplineControlSpace: if dim == 2: bbox = [(-2, 2), (-2, 2)] orders = [2, 2] levels = [4, 4] else: bbox = [(-3, 3), (-3, 3), (-3, 3)] orders = [2, 2, 2] levels = [3, 3, 3] Q = fs.BsplineControlSpace(mesh, bbox, orders, levels) elif controlspace_t == fs.FeMultiGridControlSpace: Q = fs.FeMultiGridControlSpace(mesh, refinements=1, order=2) else: Q = controlspace_t(mesh) inner = inner_t(Q) # if running with -v or --verbose, then export the shapes if verbose: out = fd.File("domain.pvd") def cb(*args): out.write(Q.mesh_m.coordinates) cb() else: cb = None # levelset test case if dim == 2: (x, y) = fd.SpatialCoordinate(Q.mesh_m) f = (pow(x, 2)) + pow(1.3 * y, 2) - 1. elif dim == 3: (x, y, z) = fd.SpatialCoordinate(Q.mesh_m) f = (pow(x, 2)) + pow(0.8 * y, 2) + pow(1.3 * z, 2) - 1. else: raise NotImplementedError J = fsz.LevelsetFunctional(f, Q, cb=cb, scale=0.1) if use_extension == "w_ext": ext = fs.ElasticityExtension(Q.V_r) if use_extension == "w_ext_fixed_dim": ext = fs.ElasticityExtension(Q.V_r, fixed_dims=[0]) else: ext = None q = fs.ControlVector(Q, inner, boundary_extension=ext) """ move mesh a bit to check that we are not doing the taylor test in T=id """ g = q.clone() J.gradient(g, q, None) q.plus(g) J.update(q, None, 1) """ Start taylor test """ J.gradient(g, q, None) res = J.checkGradient(q, g, 5, 1) errors = [l[-1] for l in res] assert (errors[-1] < 0.11 * errors[-2]) q.scale(0) """ End taylor test """ grad_tol = 1e-6 if dim == 2 else 1e-4 # ROL parameters params_dict = { 'General': { 'Secant': { 'Type': 'Limited-Memory BFGS', 'Maximum Storage': 50 } }, 'Step': { 'Type': 'Line Search', 'Line Search': { 'Descent Method': { 'Type': 'Quasi-Newton Step' } } }, 'Status Test': { 'Gradient Tolerance': grad_tol, 'Step Tolerance': 1e-10, 'Iteration Limit': 150 } } # assemble and solve ROL optimization problem params = ROL.ParameterList(params_dict, "Parameters") problem = ROL.OptimizationProblem(J, q) solver = ROL.OptimizationSolver(problem, params) solver.solve() # verify that the norm of the gradient at optimum is small enough state = solver.getAlgorithmState() assert (state.gnorm < grad_tol)