Exemple #1
0
def run_L2tracking_optimization(write_output=False):
    """ Test template for fsz.LevelsetFunctional."""

    # tool for developing new tests, allows storing shape iterates
    if write_output:
        out = fd.File("domain.pvd")

        def cb(*args):
            out.write(Q.mesh_m.coordinates)

        cb()
    else:
        cb = None

    # setup problem
    mesh = fd.UnitSquareMesh(30, 30)
    Q = fs.FeControlSpace(mesh)
    inner = fs.ElasticityInnerProduct(Q)
    q = fs.ControlVector(Q, inner)

    # setup PDE constraint
    mesh_m = Q.mesh_m
    e = PoissonSolver(mesh_m)

    # create PDEconstrained objective functional
    J_ = L2trackingObjective(e, Q, cb=cb)
    J = fs.ReducedObjective(J_, e)

    # ROL parameters
    params_dict = {
        'General': {
            'Secant': {
                'Type': 'Limited-Memory BFGS',
                'Maximum Storage': 10
            }
        },
        'Step': {
            'Type': 'Line Search',
            'Line Search': {
                'Descent Method': {
                    'Type': 'Quasi-Newton Step'
                }
            },
        },
        'Status Test': {
            'Gradient Tolerance': 1e-4,
            'Step Tolerance': 1e-5,
            'Iteration Limit': 15
        }
    }

    # assemble and solve ROL optimization problem
    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(J, q)
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()

    # verify that the norm of the gradient at optimum is small enough
    state = solver.getAlgorithmState()
    assert (state.gnorm < 1e-4)
Exemple #2
0
        def solve(self):
            """
            Solve the optimization problem and return the optimized
            parameters.
            """

            bnd = self.bounds
            econs = self.constraints[0][0]
            emuls = self.constraints[0][1]
            icons = self.constraints[1][0]
            imuls = self.constraints[1][1]
            if len(icons) > 0:
                zeros = [i.clone() for i in imuls]
                ibnds = [ROL.Bounds(z, isLower=True) for z in zeros]
            else:
                ibnds = []

            rolproblem = ROL.OptimizationProblem(self.rolobjective,
                                                 self.rolvector,
                                                 bnd=bnd,
                                                 econs=econs,
                                                 emuls=emuls,
                                                 icons=icons,
                                                 imuls=imuls,
                                                 ibnds=ibnds)
            x = self.rolvector
            params = ROL.ParameterList(self.params_dict, "Parameters")
            self.solver = ROL.OptimizationSolver(rolproblem, params)
            self.solver.solve()
            return self.problem.reduced_functional.controls.delist(x.dat)
Exemple #3
0
def test_std_vector_run():

    params_dict = {
        'General': {
            'Secant': {
                'Type': 'Limited-Memory BFGS',
                'Maximum Storage': 5
            }
        },
        'Step': {
            'Type': 'Line Search',
            'Line Search': {
                'Descent Method': {
                    'Type': 'Quasi-Newton Method'
                }
            }
        },
        'Status Test': {
            'Gradient Tolerance': 1e-15,
            'Relative Gradient Tolerance': 1e-10,
            'Step Tolerance': 1e-16,
            'Relative Step Tolerance': 1e-10,
            'Iteration Limit': 10
        }
    }
    params = ROL.ParameterList(params_dict, "Parameters")
    algo = ROL.Algorithm("Line Search", params)
    x = ROL.StdVector(2)
    x[0] = -1.0
    x[1] = 2.0

    algo.run(x, obj)
    # Check answer to 8 decimal places
    assert round(x[0] - 1.0, 8) == 0.0
    assert round(x[1], 8) == 0.0
Exemple #4
0
def run_HS13(Vec, params_dict):
    obj = HS13_Obj()
    x = Vec(2)
    d = Vec(2)
    HS13_initial_guess(x)
    l = Vec(1)
    l[0] = 1.0
    icon = HS13_Icon()
    ilower = Vec(1)
    HS13_Ibnds(ilower)
    ibnd = ROL.Bounds(ilower, isLower=True)
    lower = Vec(2)
    HS13_Bnd(lower)
    bnd = ROL.Bounds(lower, isLower=True)
    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(obj,
                                      x,
                                      bnd=bnd,
                                      icons=[icon],
                                      imuls=[l],
                                      ibnds=[ibnd])
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()
    print(x[0], x[1])
    assert HS13_minimum(x)
def test_equality_constraint(pytestconfig):
    mesh = fs.DiskMesh(0.05, radius=2.)

    Q = fs.FeControlSpace(mesh)
    inner = fs.ElasticityInnerProduct(Q, direct_solve=True)
    mesh_m = Q.mesh_m
    (x, y) = fd.SpatialCoordinate(mesh_m)

    q = fs.ControlVector(Q, inner)
    if pytestconfig.getoption("verbose"):
        out = fd.File("domain.pvd")

        def cb(*args):
            out.write(Q.mesh_m.coordinates)
    else:
        cb = None
    f = (pow(2 * x, 2)) + pow(y - 0.1, 2) - 1.2

    J = fsz.LevelsetFunctional(f, Q, cb=cb)
    vol = fsz.LevelsetFunctional(fd.Constant(1.0), Q)
    e = fs.EqualityConstraint([vol])
    emul = ROL.StdVector(1)

    params_dict = {
        'Step': {
            'Type': 'Augmented Lagrangian',
            'Augmented Lagrangian': {
                'Subproblem Step Type': 'Line Search',
                'Penalty Parameter Growth Factor': 2.,
                'Initial Penalty Parameter': 1.,
                'Subproblem Iteration Limit': 20,
            },
            'Line Search': {
                'Descent Method': {
                    'Type': 'Quasi-Newton Step'
                }
            },
        },
        'General': {
            'Secant': {
                'Type': 'Limited-Memory BFGS',
                'Maximum Storage': 5
            }
        },
        'Status Test': {
            'Gradient Tolerance': 1e-4,
            'Step Tolerance': 1e-10,
            'Iteration Limit': 10
        }
    }

    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(J, q, econ=e, emul=emul)
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()

    state = solver.getAlgorithmState()
    assert (state.gnorm < 1e-4)
    assert (state.cnorm < 1e-6)
Exemple #6
0
def run_U(algo):
    obj = MyObj()
    paramsDict["Step"]["Type"] = algo
    params = ROL.ParameterList(paramsDict, "Parameters")
    x = NumpyVector(2)
    optimProblem = ROL.OptimizationProblem(obj, x)
    solver = ROL.OptimizationSolver(optimProblem, params)
    solver.solve()
    print(x.data)
    assert round(x[0] - 1.0, 6) == 0.0
    assert round(x[1], 6) == 0.0
Exemple #7
0
def test_std_vector_check():
    x = ROL.StdVector(2)
    y = ROL.StdVector(2)
    z = ROL.StdVector(2)

    x[0] = 1.5
    x[1] = 0.5
    y[0] = 1.2
    y[1] = 0.2

    u = x.checkVector(y, z)
    assert sum(u) < 1e-12
Exemple #8
0
def get_problem():
    obj = MyObj()
    x = NumpyVector(2)
    x_lo = NumpyVector(2)
    x_lo[0] = -1
    x_lo[1] = -1
    x_up = NumpyVector(2)
    x_up[0] = +0.7
    x_up[1] = +0.7
    bnd = ROL.Bounds(x_lo, x_up, 1.0)
    optimProblem = ROL.OptimizationProblem(obj, x, bnd=bnd)
    return optimProblem
Exemple #9
0
def test_create_bounds_seperately():
    obj = MyObj()
    paramsDict["Step"]["Type"] = "Trust Region"
    params = ROL.ParameterList(paramsDict, "Parameters")
    x = NumpyVector(2)
    bnd = createBounds()
    bnd.test()
    optimProblem = ROL.OptimizationProblem(obj, x, bnd=bnd)
    solver = ROL.OptimizationSolver(optimProblem, params)
    solver.solve()
    assert round(x[0] - 0.7, 6) == 0.0
    assert round(x[1], 6) == 0.0
Exemple #10
0
def run_E(algo):
    obj = MyObj2()
    paramsDict["Step"]["Type"] = algo
    params = ROL.ParameterList(paramsDict, "Parameters")
    x = NumpyVector(2)
    x[0] = 0.5 * 0.5**2
    x[1] = 0.5 * 0.5**2
    l = NumpyVector(1)
    con = EqConstraint()
    optimProblem = ROL.OptimizationProblem(obj, x, econ=con, emul=l)
    solver = ROL.OptimizationSolver(optimProblem, params)
    solver.solve()
    assert round(x[0] - 0.707106, 5) == 0.0
    assert round(x[1] - 0.707106, 5) == 0.0
Exemple #11
0
def run_HS4(Vec, params_dict):
    obj = HS4_Obj()
    x = Vec(2)
    HS4_initial_guess(x)
    lower = Vec(2)
    upper = Vec(2)
    HS4_Bnd(lower, upper)
    bnd = ROL.Bounds(lower, upper, 1.0)
    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(obj, x, bnd=bnd)
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()
    print(x[0], x[1])
    assert HS4_minimum(x)
Exemple #12
0
def get_rol_bounds(py_lb, py_ub):
    nvars = len(py_lb)
    # if np.all(py_lb == -np.inf) and np.all(py_ub == np.inf):
    #    raise Exception('Bounds not needed lb and ub are -inf, +inf')
    if np.any(py_lb == np.inf):
        raise Exception('A lower bound was set to +inf')
    if np.any(py_ub == -np.inf):
        raise Exception('An upper bound was set to -inf')

    lb, ub = RolVector(nvars), RolVector(nvars)
    for ii in range(nvars):
        lb[ii], ub[ii] = py_lb[ii], py_ub[ii]

    # if np.all(py_lb == -np.inf) and not np.all(py_ub == np.inf):
    #    return ROL.Bounds(ub, False, 1.0)
    # elif np.all(py_ub == np.inf) and not np.all(py_lb == -np.inf):
    #    return ROL.Bounds(lb, True, 1.0)
    # avoid overflow warnings created by numpy_vector.py
    I = np.where(~np.isfinite(py_lb))[0]
    J = np.where(~np.isfinite(py_ub))[0]
    for ii in I:
        lb[ii] = -1e6  # -np.finfo(float).max/100
    for jj in J:
        ub[jj] = 1e6  # np.finfo(float).max/100
    # print(lb.data, ub.data)
    return ROL.Bounds(lb, ub, 1.0)
Exemple #13
0
def run_HS28(Vec, params_dict):
    obj = HS28_Obj()
    x = Vec(3)
    HS28_initial_guess(x)
    # obj.checkGradient(x)
    # obj.checkHessVec(x)
    HS28_initial_guess(x)
    l = Vec(1)
    l[0] = 0.0
    con = HS28_Econ()
    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(obj, x, econs=[con], emuls=[l])
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()
    print(x[0], x[1], x[2])
    assert HS28_minimum(x)
Exemple #14
0
        def __get_bounds(self):
            bounds = self.problem.bounds
            if bounds is None:
                return None

            controlvec = self.rolvector
            lowervec = controlvec.clone()
            uppervec = controlvec.clone()

            for i in range(len(controlvec.dat)):
                general_lb, general_ub = bounds[i]
                if isinstance(general_lb, (int, float)):
                    lowervec.dat[i]._applyUnary(lambda x: general_lb)
                else:
                    lowervec.dat[i].assign(general_lb)
                if isinstance(general_ub, (int, float)):
                    uppervec.dat[i]._applyUnary(lambda x: general_ub)
                else:
                    uppervec.dat[i].assign(general_ub)

            res = ROL.Bounds(lowervec, uppervec, 1.0)
            # FIXME: without this the lowervec and uppervec get cleaned up too
            # early.  This is a bug in PyROL and we'll hopefully figure that out
            # soon
            self.lowervec = lowervec
            self.uppervec = uppervec
            return res
Exemple #15
0
def run_B(algo):
    obj = MyObj()
    paramsDict["Step"]["Type"] = algo
    params = ROL.ParameterList(paramsDict, "Parameters")
    x = NumpyVector(2)
    x_lo = NumpyVector(2)
    x_lo[0] = -1
    x_lo[1] = -1
    x_up = NumpyVector(2)
    x_up[0] = +0.7
    x_up[1] = +0.7
    bnd = ROL.Bounds(x_lo, x_up, 1.0)
    optimProblem = ROL.OptimizationProblem(obj, x, bnd=bnd)
    solver = ROL.OptimizationSolver(optimProblem, params)
    solver.solve()
    assert round(x[0] - 0.7, 6) == 0.0
    assert round(x[1], 6) == 0.0
Exemple #16
0
def createBounds():
    x_lo = NumpyVector(2)
    x_lo[0] = -1
    x_lo[1] = -1
    x_up = NumpyVector(2)
    x_up[0] = +0.7
    x_up[1] = +0.7
    bnd = ROL.Bounds(x_lo, x_up, 1.0)
    bnd.test()
    return bnd
Exemple #17
0
def test_TimeTracking():
    """ Main test."""

    # setup problem
    mesh = fd.UnitSquareMesh(20, 20)
    Q = fs.FeControlSpace(mesh)
    inner = fs.LaplaceInnerProduct(Q, fixed_bids=[1, 2, 3, 4])
    q = fs.ControlVector(Q, inner)

    # create PDEconstrained objective functional
    J = TimeTracking(Q)

    # ROL parameters
    params_dict = {
        'General': {
            'Secant': {
                'Type': 'Limited-Memory BFGS',
                'Maximum Storage': 25
            }
        },
        'Step': {
            'Type': 'Trust Region'
        },
        'Status Test': {
            'Gradient Tolerance': 1e-3,
            'Step Tolerance': 1e-8,
            'Iteration Limit': 20
        }
    }

    # assemble and solve ROL optimization problem
    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(J, q)
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()

    # verify that the norm of the gradient at optimum is small enough
    state = solver.getAlgorithmState()
    assert (state.gnorm < 1e-3)
Exemple #18
0
def run_HS29(Vec, params_dict):
    params = ROL.ParameterList(params_dict, "Parameters")
    obj = HS29_Obj()
    x = Vec(3)
    x[0] = 1.
    x[1] = 2.
    x[2] = .1
    d = Vec(3)
    d[0] = 1
    d[1] = -1
    d[2] = 1.
    v = Vec(3)
    v[0] = 1
    v[1] = -1
    v[2] = 1.
    # obj.checkGradient(x)
    # obj.checkHessVec(x, d, 4, 1)

    HS29_initial_guess(x)
    l = Vec(1)
    l[0] = 0.0
    con = HS29_Icon()
    jv = Vec(1)
    jv[0] = 1.
    # con.checkApplyJacobian(x, d, jv, 4, 1)
    # con.checkAdjointConsistencyJacobian(jv, d, x)
    # con.checkApplyAdjointHessian(x, jv, d, v, 5, 1)
    ilower = Vec(1)
    HS29_Ibnds(ilower)
    ibnd = ROL.Bounds(ilower, isLower=True)
    problem = ROL.OptimizationProblem(obj,
                                      x,
                                      icons=[con],
                                      imuls=[l],
                                      ibnds=[ibnd])
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()
    print(x[0], x[1], x[2])
    assert HS29_minimum(x)
Exemple #19
0
def test_gradient_talyor_remainder_v2():
    from ROL.firedrake_vector import FiredrakeVector as FeVector
    import ROL

    comm = spyro.utils.mpi_init(model)

    mesh, V = spyro.io.read_mesh(model, comm)

    vp_guess = _make_vp_guess(V, mesh)

    sources = spyro.Sources(model, mesh, V, comm)

    receivers = spyro.Receivers(model, mesh, V, comm)

    vp_exact = _make_vp_exact(V, mesh)

    _, p_exact_recv = spyro.solvers.forward(model, mesh, comm, vp_exact,
                                            sources, wavelet, receivers)

    qr_x, _, _ = spyro.domains.quadrature.quadrature_rules(V)

    class L2Inner(object):
        def __init__(self):
            self.A = assemble(TrialFunction(V) * TestFunction(V) *
                              dx(rule=qr_x),
                              mat_type="matfree")
            self.Ap = as_backend_type(self.A).mat()

        def eval(self, _u, _v):
            upet = as_backend_type(_u).vec()
            vpet = as_backend_type(_v).vec()
            A_u = self.Ap.createVecLeft()
            self.Ap.mult(upet, A_u)
            return vpet.dot(A_u)

    class Objective(ROL.Objective):
        def __init__(self, inner_product):
            ROL.Objective.__init__(self)
            self.inner_product = inner_product
            self.p_guess = None
            self.misfit = None

        def value(self, x, tol):
            """Compute the functional"""
            self.p_guess, p_guess_recv = spyro.solvers.forward(
                model,
                mesh,
                comm,
                vp_guess,
                sources,
                wavelet,
                receivers,
                output=False,
            )
            self.misfit = spyro.utils.evaluate_misfit(model, p_guess_recv,
                                                      p_exact_recv)
            J = spyro.utils.compute_functional(model, self.misfit)
            return J

        def gradient(self, g, x, tol):
            dJ = spyro.solvers.gradient(
                model,
                mesh,
                comm,
                vp_guess,
                receivers,
                self.p_guess,
                self.misfit,
            )
            g.scale(0)
            g.vec += dJ

        def update(self, x, flag, iteration):
            vp_guess.assign(Function(V, x.vec, name="velocity"))

    paramsDict = {
        "Step": {
            "Line Search": {
                "Descent Method": {
                    "Type": "Quasi-Newton Method"
                }
            },
            "Type": "Line Search",
        },
        "Status Test": {
            "Gradient Tolerance": 1e-12,
            "Iteration Limit": 20
        },
    }
    params = ROL.ParameterList(paramsDict, "Parameters")

    inner_product = L2Inner()
    obj = Objective(inner_product)
    u = Function(V).assign(vp_guess)
    opt = FeVector(u.vector(), inner_product)
    d = Function(V)

    x, y = SpatialCoordinate(mesh)
    # d.interpolate(sin(x * pi) * sin(y * pi))
    d.vector()[:] = np.random.rand(V.dim())
    # d.assign(0.1)
    d = FeVector(d.vector(), inner_product)
    # check the gradient using d model pertubation 4 iterations and 2nd order test
    obj.checkGradient(opt, d, 4, 2)
def test_levelset(dim, inner_t, controlspace_t, use_extension, pytestconfig):
    verbose = pytestconfig.getoption("verbose")
    """ Test template for fsz.LevelsetFunctional."""

    clscale = 0.1 if dim == 2 else 0.2

    # make the mesh a bit coarser if we are using a multigrid control space as
    # we are refining anyway
    if controlspace_t == fs.FeMultiGridControlSpace:
        clscale *= 4

    if dim == 2:
        mesh = fs.DiskMesh(clscale)
    elif dim == 3:
        mesh = fs.SphereMesh(clscale)
    else:
        raise NotImplementedError

    if controlspace_t == fs.BsplineControlSpace:
        if dim == 2:
            bbox = [(-2, 2), (-2, 2)]
            orders = [2, 2]
            levels = [4, 4]
        else:
            bbox = [(-3, 3), (-3, 3), (-3, 3)]
            orders = [2, 2, 2]
            levels = [3, 3, 3]
        Q = fs.BsplineControlSpace(mesh, bbox, orders, levels)
    elif controlspace_t == fs.FeMultiGridControlSpace:
        Q = fs.FeMultiGridControlSpace(mesh, refinements=1, order=2)
    else:
        Q = controlspace_t(mesh)

    inner = inner_t(Q)
    # if running with -v or --verbose, then export the shapes
    if verbose:
        out = fd.File("domain.pvd")

        def cb(*args):
            out.write(Q.mesh_m.coordinates)

        cb()
    else:
        cb = None

    # levelset test case
    if dim == 2:
        (x, y) = fd.SpatialCoordinate(Q.mesh_m)
        f = (pow(x, 2)) + pow(1.3 * y, 2) - 1.
    elif dim == 3:
        (x, y, z) = fd.SpatialCoordinate(Q.mesh_m)
        f = (pow(x, 2)) + pow(0.8 * y, 2) + pow(1.3 * z, 2) - 1.

    else:
        raise NotImplementedError

    J = fsz.LevelsetFunctional(f, Q, cb=cb, scale=0.1)

    if use_extension == "w_ext":
        ext = fs.ElasticityExtension(Q.V_r)
    if use_extension == "w_ext_fixed_dim":
        ext = fs.ElasticityExtension(Q.V_r, fixed_dims=[0])
    else:
        ext = None

    q = fs.ControlVector(Q, inner, boundary_extension=ext)

    # these tolerances are not very stringent, but solutions are correct with
    # tighter tolerances,  the combination
    # FeMultiGridControlSpace-ElasticityInnerProduct fails because the mesh
    # self-intersects (one should probably be more careful with the opt params)
    grad_tol = 1e-1
    itlim = 15
    itlimsub = 15

    # Volume constraint
    vol = fsz.LevelsetFunctional(fd.Constant(1.0), Q, scale=1)
    initial_vol = vol.value(q, None)
    econ = fs.EqualityConstraint([vol], target_value=[initial_vol])
    emul = ROL.StdVector(1)

    # ROL parameters
    params_dict = {
        'Step': {
            'Type': 'Augmented Lagrangian',
            'Augmented Lagrangian': {
                'Subproblem Step Type': 'Line Search',
                'Penalty Parameter Growth Factor': 1.05,
                'Print Intermediate Optimization History': True,
                'Subproblem Iteration Limit': itlimsub
            },
            'Line Search': {
                'Descent Method': {
                    'Type': 'Quasi-Newton Step'
                }
            },
        },
        'General': {
            'Secant': {
                'Type': 'Limited-Memory BFGS',
                'Maximum Storage': 50
            }
        },
        'Status Test': {
            'Gradient Tolerance': grad_tol,
            'Step Tolerance': 1e-10,
            'Iteration Limit': itlim
        }
    }
    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(J, q, econ=econ, emul=emul)
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()

    # verify that the norm of the gradient at optimum is small enough
    # and that the volume has not changed too much
    state = solver.getAlgorithmState()
    assert (state.gnorm < grad_tol)
    assert abs(vol.value(q, None) - initial_vol) < 1e-2
Exemple #21
0
def rol_minimize(fun,
                 x0,
                 method=None,
                 jac=None,
                 hess=None,
                 hessp=None,
                 bounds=None,
                 constraints=(),
                 tol=None,
                 options={},
                 x_grad=None):
    obj = ROLObj(fun, jac, hess, hessp)
    if x_grad is not None:
        print("Testing objective", flush=True)
        xg = get_rol_numpy_vector(x_grad)
        d = get_rol_numpy_vector(np.random.normal(0, 1, (x_grad.shape[0])))
        obj.checkGradient(xg, d, 12, 1)
        obj.checkHessVec(xg, d, 12, 1)

    use_bfgs = False
    if hess is None and hessp is None:
        use_bfgs = True
    if type(hess) == BFGS:
        use_bfgs = True
    for constr in constraints:
        if (type(constr) != LinearConstraint
                and (type(constr.hess) == BFGS or constr.hess is None)):
            use_bfgs = True
            constr.hess = None

    assert method == 'rol-trust-constr' or method == None
    if 'step-type' in options:
        rol_method = options['step-type']
        del options['step-type']
    else:
        rol_method = 'Augmented Lagrangian'
    params = get_rol_parameters(rol_method, use_bfgs, options)
    x = get_rol_numpy_vector(x0)
    bnd, econ, emul, icon, imul, ibnd = get_constraints(
        constraints, bounds, x_grad)
    optimProblem = ROL.OptimizationProblem(obj,
                                           x,
                                           bnd=bnd,
                                           econs=econ,
                                           emuls=emul,
                                           icons=icon,
                                           imuls=imul,
                                           ibnds=ibnd)
    solver = ROL.OptimizationSolver(optimProblem, params)
    solver.solve(options.get('verbose', 0))
    state = solver.getAlgorithmState()
    success = state.statusFlag.name == 'EXITSTATUS_CONVERGED'
    res = OptimizeResult(
        x=rol_vector_to_numpy(x),
        fun=state.value,
        cnorm=state.cnorm,
        gnorm=state.gnorm,
        snorm=state.snorm,
        success=success,
        nit=state.iter,
        nfev=state.nfval,
        ngev=state.ngrad,
        constr_nfev=state.ncval,
        status=state.statusFlag.name,
        message=f'Optimization terminated early {state.statusFlag.name}')
    return res
def test_objective_plus_box_constraint(pytestconfig):

    n = 10
    mesh = fd.UnitSquareMesh(n, n)
    T = mesh.coordinates.copy(deepcopy=True)
    (x, y) = fd.SpatialCoordinate(mesh)
    T.interpolate(T + fd.Constant((0, 0)))
    mesh = fd.Mesh(T)

    Q = fs.FeControlSpace(mesh)
    inner = fs.LaplaceInnerProduct(Q)
    mesh_m = Q.mesh_m
    q = fs.ControlVector(Q, inner)
    if pytestconfig.getoption("verbose"):
        out = fd.File("domain.pvd")

        def cb():
            out.write(mesh_m.coordinates)
    else:

        def cb():
            pass

    lower_bound = Q.T.copy(deepcopy=True)
    lower_bound.interpolate(fd.Constant((-0.2, -0.2)))
    upper_bound = Q.T.copy(deepcopy=True)
    upper_bound.interpolate(fd.Constant((+1.2, +1.2)))

    # levelset test case
    (x, y) = fd.SpatialCoordinate(Q.mesh_m)
    f = (pow(x - 0.5, 2)) + pow(y - 0.5, 2) - 4.
    J1 = fsz.LevelsetFunctional(f, Q, cb=cb, quadrature_degree=10)
    J2 = fsz.MoYoBoxConstraint(10., [1, 2, 3, 4],
                               Q,
                               lower_bound=lower_bound,
                               upper_bound=upper_bound,
                               cb=cb,
                               quadrature_degree=10)
    J3 = fsz.MoYoSpectralConstraint(100,
                                    fd.Constant(0.6),
                                    Q,
                                    cb=cb,
                                    quadrature_degree=100)

    J = 0.1 * J1 + J2 + J3
    g = q.clone()
    J.gradient(g, q, None)
    taylor_result = J.checkGradient(q, g, 9, 1)

    for i in range(len(taylor_result) - 1):
        if taylor_result[i][3] > 1e-6 and taylor_result[i][3] < 1e-3:
            assert taylor_result[i + 1][3] <= taylor_result[i][3] * 0.15

    params_dict = {
        'Step': {
            'Type': 'Line Search',
            'Line Search': {
                'Descent Method': {
                    'Type': 'Quasi-Newton Step'
                }
            }
        },
        'General': {
            'Secant': {
                'Type': 'Limited-Memory BFGS',
                'Maximum Storage': 2
            }
        },
        'Status Test': {
            'Gradient Tolerance': 1e-10,
            'Step Tolerance': 1e-10,
            'Iteration Limit': 10
        }
    }

    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(J, q)
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()
    Tvec = Q.T.vector()
    nodes = fd.DirichletBC(Q.V_r, fd.Constant((0.0, 0.0)), [2]).nodes
    assert np.all(Tvec[nodes, 0] <= 1.2 + 1e-1)
    assert np.all(Tvec[nodes, 1] <= 1.2 + 1e-1)
Exemple #23
0
def get_rol_parameters(method, use_bfgs, options):
    paramlist_filename = options.get("paramlist_filename", None)
    if paramlist_filename is not None:
        paramlist = ROL.ParameterList(paramlist_filename)
        return paramlist

    paramsDict = {}
    # method = "Augmented Lagrangian"
    # method = "Fletcher"
    # method = "Moreau-Yosida Penalty"
    #method = 'Fletcher'
    assert method in [
        "Augmented Lagrangian", "Fletcher", "Moreau-Yosida Penalty"
    ]
    paramsDict["Step"] = {"Type": method}
    #paramsDict["Step"]["Fletcher"] = {}
    #paramsDict["Step"]["Fletcher"]['Penalty Parameter'] = 1e8

    paramsDict["Step"]["Trust Region"] = {}
    paramsDict["Step"]["Trust Region"]["Subproblem Solver"] = "Truncated CG"
    paramsDict["Step"]["Trust Region"]["Subproblem Model"] = "Kelley Sachs"
    paramsDict["Step"]["Trust Region"]['Initial Radius'] = 10
    #paramsDict["Step"]["Trust Region"]["Subproblem Model"] = "Coleman-Li"
    #paramsDict["Step"]["Trust Region"]["Subproblem Solver"] = "Lin-More"
    #paramsDict["Step"]["Trust Region"]["Subproblem Model"] = "Lin-More"

    paramsDict["Step"]["Augmented Lagrangian"] = {
        #     'Initial Optimality Tolerance':1e-1,
        #     'Initial Feasibility Tolerance':1e-1,
        'Use Default Problem Scaling': False,
        'Print Intermediate Optimization History':
        (options.get('verbose', 0) > 2),
        'Use Default Initial Penalty Parameter': False,
        'Initial Penalty Parameter': 1e3,
        'Maximum Penalty Parameter': 1e8,
        'Penalty Parameter Growth Factor': 2,
        #    'Subproblem Iteration Limit':200
    }
    # paramsDict["Step"]["Moreau-Yosida Penalty"]  = {
    #    'Subproblem':{'Iteration Limit':20}, 'Initial Penalty Parameter':1e-2,
    #    'Penalty Parameter Growth Factor':2, 'Update Penalty':True}

    paramsDict["General"] = {
        'Print Verbosity': int(options.get('verbose', 0) > 3)
    }
    paramsDict["General"]["Secant"] = {"Use as Hessian": False}
    if use_bfgs:
        paramsDict["General"]["Secant"]["Use as Hessian"] = True
        paramsDict["Step"]["Line Search"] = {}
        paramsDict["Step"]["Line Search"]["Descent Method"] = {}
        paramsDict["Step"]["Line Search"]["Descent Method"]["Type"] = \
            "Quasi-Newton Method"

    paramsDict["Status Test"] = {
        "Gradient Tolerance": options.get('gtol', 1e-8),
        "Step Tolerance": options.get('xtol', 1e-14),
        "Constraint Tolerance": options.get('ctol', 1e-8),
        "Iteration Limit": options.get("maxiter", 100)
    }
    paramlist = ROL.ParameterList(paramsDict, "Parameters")
    return paramlist
def test_box_constraint(pytestconfig):

    n = 5
    mesh = fd.UnitSquareMesh(n, n)
    T = mesh.coordinates.copy(deepcopy=True)
    (x, y) = fd.SpatialCoordinate(mesh)
    T.interpolate(T + fd.Constant((1, 0)) * x * y)
    mesh = fd.Mesh(T)

    Q = fs.FeControlSpace(mesh)
    inner = fs.LaplaceInnerProduct(Q, fixed_bids=[1])
    mesh_m = Q.mesh_m
    q = fs.ControlVector(Q, inner)
    if pytestconfig.getoption("verbose"):
        out = fd.File("domain.pvd")

        def cb():
            out.write(mesh_m.coordinates)
    else:

        def cb():
            pass

    lower_bound = Q.T.copy(deepcopy=True)
    lower_bound.interpolate(fd.Constant((-0.0, -0.0)))
    upper_bound = Q.T.copy(deepcopy=True)
    upper_bound.interpolate(fd.Constant((+1.3, +0.9)))

    J = fsz.MoYoBoxConstraint(1, [2],
                              Q,
                              lower_bound=lower_bound,
                              upper_bound=upper_bound,
                              cb=cb,
                              quadrature_degree=100)
    g = q.clone()
    J.gradient(g, q, None)
    taylor_result = J.checkGradient(q, g, 9, 1)

    for i in range(len(taylor_result) - 1):
        if taylor_result[i][3] > 1e-7:
            assert taylor_result[i + 1][3] <= taylor_result[i][3] * 0.11

    params_dict = {
        'Step': {
            'Type': 'Line Search',
            'Line Search': {
                'Descent Method': {
                    'Type': 'Quasi-Newton Step'
                }
            }
        },
        'General': {
            'Secant': {
                'Type': 'Limited-Memory BFGS',
                'Maximum Storage': 2
            }
        },
        'Status Test': {
            'Gradient Tolerance': 1e-10,
            'Step Tolerance': 1e-10,
            'Iteration Limit': 150
        }
    }

    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(J, q)
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()
    Tvec = Q.T.vector()
    nodes = fd.DirichletBC(Q.V_r, fd.Constant((0.0, 0.0)), [2]).nodes
    assert np.all(Tvec[nodes, 0] <= 1.3 + 1e-4)
    assert np.all(Tvec[nodes, 1] <= 0.9 + 1e-4)

class Objective(ROL.Objective):
    def __init__(self):
        super().__init__()

    def value(self, x, tol):
        return (x[0] - 1)**2 + x[1]**2

    def gradient(self, g, x, tol):
        g[0] = 2 * (x[0] - 1)
        g[1] = 2 * x[1]


obj = Objective()
x = ROL.StdVector(2)
x[0] = -1.0
x[1] = 2.0

lower = ROL.StdVector(2)
lower[0] = -10
lower[1] = -10

upper = ROL.StdVector(2)

upper[0] = 0.5
upper[1] = 0.5
bnd = ROL.Bounds(lower, upper, 1.0)

params = ROL.ParameterList(params_dict, "Parameters")
problem = ROL.OptimizationProblem(obj, x)
        da = Function(A, v.vec)
        jv[0] = assemble(da * dx)

    def applyAdjointJacobian(self, ajv, v, x, tol):
        da = TestFunction(A)
        deriv = assemble(da*dx)
        if self.inner_product is not None:
            grad = self.inner_product.riesz_map(deriv)
        else:
            grad = deriv
        ajv.scale(0)
        ajv.vec += grad
        ajv.scale(v[0])

# Initialise 'ROLVector'
l = ROL.StdVector(1)
c = ROL.StdVector(1)
v = ROL.StdVector(1)
v[0] = 1.0
dualv = ROL.StdVector(1)
v.checkVector(c, l)

x = interpolate(Constant(0.5), A)
x = FeVector(x.vector(), dot_product)
g = Function(A)
g = FeVector(g.vector(), dot_product)
d = interpolate(Expression("1 + x[0] * (1-x[0])*x[1] * (1-x[1])", degree=1), A)
d = FeVector(d.vector(), dot_product)
x.checkVector(d, g)

jd = Function(A)
Exemple #27
0
    "Step": {
        "Type": "Augmented Lagrangian",
        "Augmented Lagrangian": {
            "Subproblem Step Type": "Line Search",
            "Subproblem Iteration Limit": 5.0,
        },
        "Line Search": {"Descent Method": {"Type": "Quasi-Newton Step"}},
    },
    "Status Test": {
        "Gradient Tolerance": 1e-16,
        "Iteration Limit": 100,
        "Step Tolerance": 1.0e-16,
    },
}

params = ROL.ParameterList(paramsDict, "Parameters")

inner_product = L2Inner()

obj = Objective(inner_product)

u = Function(V, name="velocity").assign(vp)
opt = FeVector(u.vector(), inner_product)

# Add control bounds to the problem (uses more RAM)
xlo = Function(V)
xlo.interpolate(Constant(1.0))
x_lo = FeVector(xlo.vector(), inner_product)

xup = Function(V)
xup.interpolate(Constant(5.0))
Exemple #28
0
    return out.write(e.solution.split()[0])


# create PDEconstrained objective functional
J_ = PipeObjective(e, Q, cb=cb)
J = fs.ReducedObjective(J_, e)

# add regularization to improve mesh quality
Jq = fsz.MoYoSpectralConstraint(10, fd.Constant(0.5), Q)
J = J + Jq

# Set up volume constraint
vol = fsz.VolumeFunctional(Q)
initial_vol = vol.value(q, None)
econ = fs.EqualityConstraint([vol], target_value=[initial_vol])
emul = ROL.StdVector(1)

# ROL parameters
params_dict = {
    'General': {'Print Verbosity': 0,  # set to 1 to understand output
                'Secant': {'Type': 'Limited-Memory BFGS',
                           'Maximum Storage': 10}},
    'Step': {'Type': 'Augmented Lagrangian',
             'Augmented Lagrangian':
             {'Subproblem Step Type': 'Trust Region',
              'Print Intermediate Optimization History': False,
              'Subproblem Iteration Limit': 10}},
    'Status Test': {'Gradient Tolerance': 1e-2,
                    'Step Tolerance': 1e-2,
                    'Constraint Tolerance': 1e-1,
                    'Iteration Limit': 10}}
J = fsz.LevelsetFunctional(f, Q, cb=lambda: out.write(mesh_m.coordinates))

q = fs.ControlVector(Q, inner)

params_dict = {
    'General': {
        'Secant': {
            'Type': 'Limited-Memory BFGS',
            'Maximum Storage': 5
        }
    },
    'Step': {
        'Type': 'Line Search',
        'Line Search': {
            'Descent Method': {
                'Type': 'Quasi-Newton Step'
            }
        }
    },
    'Status Test': {
        'Gradient Tolerance': 1e-5,
        'Step Tolerance': 1e-6,
        'Iteration Limit': 40
    }
}

params = ROL.ParameterList(params_dict, "Parameters")
problem = ROL.OptimizationProblem(J, q)
solver = ROL.OptimizationSolver(problem, params)
solver.solve()
        jv[0] = assemble(da * dx)

    def applyAdjointJacobian(self, ajv, v, x, tol):
        da = TestFunction(A)
        deriv = assemble(da * dx)
        if self.inner_product is not None:
            grad = self.inner_product.riesz_map(deriv)
        else:
            grad = deriv
        ajv.scale(0)
        ajv.vec += grad
        ajv.scale(v[0])


# Initialise 'ROLVector'
l_initializacao = ROL.StdVector(1)

x = interpolate(Constant(V / delta), A)
x = FeVector(x.vector(), dot_product)

lower = interpolate(Constant(0.0), A)
lower = FeVector(lower.vector(), dot_product)
upper = interpolate(Constant(1.0), A)
upper = FeVector(upper.vector(), dot_product)

# Instantiate Objective class for poisson problem
obj = ObjR(dot_product)
volConstr = VolConstraint(dot_product)

#set_log_level(30)