Ejemplo n.º 1
0
def test_std_vector_check():
    x = ROL.StdVector(2)
    y = ROL.StdVector(2)
    z = ROL.StdVector(2)

    x[0] = 1.5
    x[1] = 0.5
    y[0] = 1.2
    y[1] = 0.2

    u = x.checkVector(y, z)
    assert sum(u) < 1e-12
Ejemplo n.º 2
0
def test_std_vector_run():

    params_dict = {
        'General': {
            'Secant': {
                'Type': 'Limited-Memory BFGS',
                'Maximum Storage': 5
            }
        },
        'Step': {
            'Type': 'Line Search',
            'Line Search': {
                'Descent Method': {
                    'Type': 'Quasi-Newton Method'
                }
            }
        },
        'Status Test': {
            'Gradient Tolerance': 1e-15,
            'Relative Gradient Tolerance': 1e-10,
            'Step Tolerance': 1e-16,
            'Relative Step Tolerance': 1e-10,
            'Iteration Limit': 10
        }
    }
    params = ROL.ParameterList(params_dict, "Parameters")
    algo = ROL.Algorithm("Line Search", params)
    x = ROL.StdVector(2)
    x[0] = -1.0
    x[1] = 2.0

    algo.run(x, obj)
    # Check answer to 8 decimal places
    assert round(x[0] - 1.0, 8) == 0.0
    assert round(x[1], 8) == 0.0
Ejemplo n.º 3
0
def test_equality_constraint(pytestconfig):
    mesh = fs.DiskMesh(0.05, radius=2.)

    Q = fs.FeControlSpace(mesh)
    inner = fs.ElasticityInnerProduct(Q, direct_solve=True)
    mesh_m = Q.mesh_m
    (x, y) = fd.SpatialCoordinate(mesh_m)

    q = fs.ControlVector(Q, inner)
    if pytestconfig.getoption("verbose"):
        out = fd.File("domain.pvd")

        def cb(*args):
            out.write(Q.mesh_m.coordinates)
    else:
        cb = None
    f = (pow(2 * x, 2)) + pow(y - 0.1, 2) - 1.2

    J = fsz.LevelsetFunctional(f, Q, cb=cb)
    vol = fsz.LevelsetFunctional(fd.Constant(1.0), Q)
    e = fs.EqualityConstraint([vol])
    emul = ROL.StdVector(1)

    params_dict = {
        'Step': {
            'Type': 'Augmented Lagrangian',
            'Augmented Lagrangian': {
                'Subproblem Step Type': 'Line Search',
                'Penalty Parameter Growth Factor': 2.,
                'Initial Penalty Parameter': 1.,
                'Subproblem Iteration Limit': 20,
            },
            'Line Search': {
                'Descent Method': {
                    'Type': 'Quasi-Newton Step'
                }
            },
        },
        'General': {
            'Secant': {
                'Type': 'Limited-Memory BFGS',
                'Maximum Storage': 5
            }
        },
        'Status Test': {
            'Gradient Tolerance': 1e-4,
            'Step Tolerance': 1e-10,
            'Iteration Limit': 10
        }
    }

    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(J, q, econ=e, emul=emul)
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()

    state = solver.getAlgorithmState()
    assert (state.gnorm < 1e-4)
    assert (state.cnorm < 1e-6)
Ejemplo n.º 4
0
def test_levelset(dim, inner_t, controlspace_t, use_extension, pytestconfig):
    verbose = pytestconfig.getoption("verbose")
    """ Test template for fsz.LevelsetFunctional."""

    clscale = 0.1 if dim == 2 else 0.2

    # make the mesh a bit coarser if we are using a multigrid control space as
    # we are refining anyway
    if controlspace_t == fs.FeMultiGridControlSpace:
        clscale *= 4

    if dim == 2:
        mesh = fs.DiskMesh(clscale)
    elif dim == 3:
        mesh = fs.SphereMesh(clscale)
    else:
        raise NotImplementedError

    if controlspace_t == fs.BsplineControlSpace:
        if dim == 2:
            bbox = [(-2, 2), (-2, 2)]
            orders = [2, 2]
            levels = [4, 4]
        else:
            bbox = [(-3, 3), (-3, 3), (-3, 3)]
            orders = [2, 2, 2]
            levels = [3, 3, 3]
        Q = fs.BsplineControlSpace(mesh, bbox, orders, levels)
    elif controlspace_t == fs.FeMultiGridControlSpace:
        Q = fs.FeMultiGridControlSpace(mesh, refinements=1, order=2)
    else:
        Q = controlspace_t(mesh)

    inner = inner_t(Q)
    # if running with -v or --verbose, then export the shapes
    if verbose:
        out = fd.File("domain.pvd")

        def cb(*args):
            out.write(Q.mesh_m.coordinates)

        cb()
    else:
        cb = None

    # levelset test case
    if dim == 2:
        (x, y) = fd.SpatialCoordinate(Q.mesh_m)
        f = (pow(x, 2)) + pow(1.3 * y, 2) - 1.
    elif dim == 3:
        (x, y, z) = fd.SpatialCoordinate(Q.mesh_m)
        f = (pow(x, 2)) + pow(0.8 * y, 2) + pow(1.3 * z, 2) - 1.

    else:
        raise NotImplementedError

    J = fsz.LevelsetFunctional(f, Q, cb=cb, scale=0.1)

    if use_extension == "w_ext":
        ext = fs.ElasticityExtension(Q.V_r)
    if use_extension == "w_ext_fixed_dim":
        ext = fs.ElasticityExtension(Q.V_r, fixed_dims=[0])
    else:
        ext = None

    q = fs.ControlVector(Q, inner, boundary_extension=ext)

    # these tolerances are not very stringent, but solutions are correct with
    # tighter tolerances,  the combination
    # FeMultiGridControlSpace-ElasticityInnerProduct fails because the mesh
    # self-intersects (one should probably be more careful with the opt params)
    grad_tol = 1e-1
    itlim = 15
    itlimsub = 15

    # Volume constraint
    vol = fsz.LevelsetFunctional(fd.Constant(1.0), Q, scale=1)
    initial_vol = vol.value(q, None)
    econ = fs.EqualityConstraint([vol], target_value=[initial_vol])
    emul = ROL.StdVector(1)

    # ROL parameters
    params_dict = {
        'Step': {
            'Type': 'Augmented Lagrangian',
            'Augmented Lagrangian': {
                'Subproblem Step Type': 'Line Search',
                'Penalty Parameter Growth Factor': 1.05,
                'Print Intermediate Optimization History': True,
                'Subproblem Iteration Limit': itlimsub
            },
            'Line Search': {
                'Descent Method': {
                    'Type': 'Quasi-Newton Step'
                }
            },
        },
        'General': {
            'Secant': {
                'Type': 'Limited-Memory BFGS',
                'Maximum Storage': 50
            }
        },
        'Status Test': {
            'Gradient Tolerance': grad_tol,
            'Step Tolerance': 1e-10,
            'Iteration Limit': itlim
        }
    }
    params = ROL.ParameterList(params_dict, "Parameters")
    problem = ROL.OptimizationProblem(J, q, econ=econ, emul=emul)
    solver = ROL.OptimizationSolver(problem, params)
    solver.solve()

    # verify that the norm of the gradient at optimum is small enough
    # and that the volume has not changed too much
    state = solver.getAlgorithmState()
    assert (state.gnorm < grad_tol)
    assert abs(vol.value(q, None) - initial_vol) < 1e-2
Ejemplo n.º 5
0
    return out.write(e.solution.split()[0])


# create PDEconstrained objective functional
J_ = PipeObjective(e, Q, cb=cb)
J = fs.ReducedObjective(J_, e)

# add regularization to improve mesh quality
Jq = fsz.MoYoSpectralConstraint(10, fd.Constant(0.5), Q)
J = J + Jq

# Set up volume constraint
vol = fsz.VolumeFunctional(Q)
initial_vol = vol.value(q, None)
econ = fs.EqualityConstraint([vol], target_value=[initial_vol])
emul = ROL.StdVector(1)

# ROL parameters
params_dict = {
    'General': {'Print Verbosity': 0,  # set to 1 to understand output
                'Secant': {'Type': 'Limited-Memory BFGS',
                           'Maximum Storage': 10}},
    'Step': {'Type': 'Augmented Lagrangian',
             'Augmented Lagrangian':
             {'Subproblem Step Type': 'Trust Region',
              'Print Intermediate Optimization History': False,
              'Subproblem Iteration Limit': 10}},
    'Status Test': {'Gradient Tolerance': 1e-2,
                    'Step Tolerance': 1e-2,
                    'Constraint Tolerance': 1e-1,
                    'Iteration Limit': 10}}
        jv[0] = assemble(da * dx)

    def applyAdjointJacobian(self, ajv, v, x, tol):
        da = TestFunction(A)
        deriv = assemble(da * dx)
        if self.inner_product is not None:
            grad = self.inner_product.riesz_map(deriv)
        else:
            grad = deriv
        ajv.scale(0)
        ajv.vec += grad
        ajv.scale(v[0])


# Initialise 'ROLVector'
l_initializacao = ROL.StdVector(1)

x = interpolate(Constant(V / delta), A)
x = FeVector(x.vector(), dot_product)

lower = interpolate(Constant(0.0), A)
lower = FeVector(lower.vector(), dot_product)
upper = interpolate(Constant(1.0), A)
upper = FeVector(upper.vector(), dot_product)

# Instantiate Objective class for poisson problem
obj = ObjR(dot_product)
volConstr = VolConstraint(dot_product)

#set_log_level(30)
        da = Function(A, v.vec)
        jv[0] = assemble(da * dx)

    def applyAdjointJacobian(self, ajv, v, x, tol):
        da = TestFunction(A)
        deriv = assemble(da*dx)
        if self.inner_product is not None:
            grad = self.inner_product.riesz_map(deriv)
        else:
            grad = deriv
        ajv.scale(0)
        ajv.vec += grad
        ajv.scale(v[0])

# Initialise 'ROLVector'
l = ROL.StdVector(1)
c = ROL.StdVector(1)
v = ROL.StdVector(1)
v[0] = 1.0
dualv = ROL.StdVector(1)
v.checkVector(c, l)

x = interpolate(Constant(0.5), A)
x = FeVector(x.vector(), dot_product)
g = Function(A)
g = FeVector(g.vector(), dot_product)
d = interpolate(Expression("1 + x[0] * (1-x[0])*x[1] * (1-x[1])", degree=1), A)
d = FeVector(d.vector(), dot_product)
x.checkVector(d, g)

jd = Function(A)
Ejemplo n.º 8
0

class Objective(ROL.Objective):
    def __init__(self):
        super().__init__()

    def value(self, x, tol):
        return (x[0] - 1)**2 + x[1]**2

    def gradient(self, g, x, tol):
        g[0] = 2 * (x[0] - 1)
        g[1] = 2 * x[1]


obj = Objective()
x = ROL.StdVector(2)
x[0] = -1.0
x[1] = 2.0

lower = ROL.StdVector(2)
lower[0] = -10
lower[1] = -10

upper = ROL.StdVector(2)

upper[0] = 0.5
upper[1] = 0.5
bnd = ROL.Bounds(lower, upper, 1.0)

params = ROL.ParameterList(params_dict, "Parameters")
problem = ROL.OptimizationProblem(obj, x)
Ejemplo n.º 9
0
vol = fsz.LevelsetFunctional(fd.Constant(10.0), Q)
if args.problem == "pipe":
    econ_unscaled = fs.EqualityConstraint([vol])

    def wrap(f):
        return fs.DeformationCheckObjective(
            f,
            delta_threshold=0.25 if args.dim == 2 else 0.25,  # noqa
            strict=False)

    scale = 1e1
    J = wrap(scale * J)
    volweight = 0.1 if args.dim == 2 else 1.
    vol = wrap(volweight * scale**0.5 * vol)
    econ = fs.EqualityConstraint([vol])
    emul = ROL.StdVector(1)
    econ_val = ROL.StdVector(1)
elif args.problem == "obstacle":
    if args.dim == 2:
        x, y = fd.SpatialCoordinate(Q.mesh_m)
    else:
        x, y, z = fd.SpatialCoordinate(Q.mesh_m)
    baryx = fsz.LevelsetFunctional(x, Q)
    baryy = fsz.LevelsetFunctional(y, Q)
    econ_unscaled = fs.EqualityConstraint([vol, baryx, baryy])
    if args.dim == 3:
        baryz = fsz.LevelsetFunctional(z, Q)
        econ_unscaled = fs.EqualityConstraint([vol, baryx, baryy, baryz])
    if args.surf:
        scale = 1e-3
    else:
Ejemplo n.º 10
0
                     direct=True,
                     pin_pressure=True)
# e.solve()
# fd.File("out.pvd").write(e.solution.split()[0])
# import sys; sys.exit()

directory = f"./output/stokes/base_{base_inner}_cr_{use_cr}/"
if not os.path.exists(directory):
    os.makedirs(directory, exist_ok=True)
out = fd.File(directory + "u.pvd")

vol = fsz.LevelsetFunctional(fd.Constant(1.0), Q)
baryx = fsz.LevelsetFunctional(x, Q)
baryy = fsz.LevelsetFunctional(y, Q)
econ = fs.EqualityConstraint([vol, baryx, baryy])
emul = ROL.StdVector(3)
econ_val = ROL.StdVector(3)

Je = fsz.EnergyObjective(e, Q, cb=None)
Jr = fs.ReducedObjective(Je, e)
# J = 2e-4 * Jr
J = 1e-2 * Jr
q = fs.ControlVector(Q, inner)

params_dict = {
    "General": {
        "Secant": {
            "Type": "Limited-Memory BFGS",
            "Maximum Storage": 5
        }
    },