예제 #1
0
파일: inverse.py 프로젝트: benhills/icepack
    def step(self):
        r"""Take one step of the conjugate gradient iteration"""
        q = self.solution
        s = self.search_direction
        z = self.residual
        δz = self._delta_residual
        α = self.residual_energy / self.search_direction_energy

        Gs = self.operator_product
        dJ = self._rhs
        delta_energy = α * (self._assemble(action(Gs, q)) +
                            0.5 * α * self.search_direction_energy)
        self._energy += delta_energy
        self._objective += delta_energy + α * self._assemble(action(dJ, s))

        q.assign(q + Constant(α) * s)
        z.assign(z - Constant(α) * δz)

        M = self.preconditioner
        residual_energy = self._assemble(firedrake.energy_norm(M, z))
        β = residual_energy / self.residual_energy
        s.assign(Constant(β) * s + z)

        self.update_state()
        self._residual_energy = residual_energy
        Gs = self.operator_product
        self._search_direction_energy = self._assemble(action(Gs, s))

        self._iteration += 1
예제 #2
0
파일: inverse.py 프로젝트: vic1309/icepack
    def gauss_newton_energy_norm(self, q):
        r"""Compute the energy norm of a field w.r.t. the Gauss-Newton operator

        The energy norm of a field :math:`q` w.r.t. the Gauss-Newton operator
        :math:`H` can be computed using one fewer linear solve than if we were
        to calculate the action of :math:`H\cdot q` on :math:`q`. This saves
        computation when using the conjugate gradient method to solve for the
        search direction.
        """
        u, p = self.state, self.parameter

        dE = derivative(self._E, u)
        dR = derivative(self._R, p)
        dF_du, dF_dp = self._dF_du, derivative(self._F, p)

        v = firedrake.Function(u.function_space())
        firedrake.solve(dF_du == action(dF_dp, q),
                        v,
                        self._bc,
                        solver_parameters=self._solver_params,
                        form_compiler_parameters=self._fc_params)

        return self._assemble(
            firedrake.energy_norm(derivative(dE, u), v) +
            firedrake.energy_norm(derivative(dR, p), q))
예제 #3
0
    def __init__(self, state, V, direction=[1,2], params=None):
        super(InteriorPenalty, self).__init__(state)

        dt = state.timestepping.dt
        kappa = params['kappa']
        mu = params['mu']
        gamma = TestFunction(V)
        phi = TrialFunction(V)
        self.phi1 = Function(V)
        n = FacetNormal(state.mesh)
        a = inner(gamma,phi)*dx + dt*inner(grad(gamma), grad(phi)*kappa)*dx

        def get_flux_form(dS, M):

            fluxes = (-inner(2*avg(outer(phi, n)), avg(grad(gamma)*M))
                      - inner(avg(grad(phi)*M), 2*avg(outer(gamma, n)))
                      + mu*inner(2*avg(outer(phi, n)), 2*avg(outer(gamma, n)*kappa)))*dS
            return fluxes

        if 1 in direction:
            a += dt*get_flux_form(dS_v, kappa)
        if 2 in direction:
            a += dt*get_flux_form(dS_h, kappa)
        L = inner(gamma,phi)*dx
        problem = LinearVariationalProblem(a, action(L,self.phi1), self.phi1)
        self.solver = LinearVariationalSolver(problem)
예제 #4
0
def _newton_solve(z, E, scale, tolerance=1e-6, armijo=1e-4, max_iterations=50):
    F = derivative(E, z)
    H = derivative(F, z)

    Q = z.function_space()
    bc = firedrake.DirichletBC(Q, 0, 'on_boundary')
    p = firedrake.Function(Q)
    for iteration in range(max_iterations):
        firedrake.solve(H == -F, p, bc,
            solver_parameters={'ksp_type': 'preonly', 'pc_type': 'lu'})

        dE_dp = assemble(action(F, p))
        α = 1.0
        E0 = assemble(E)
        Ez = assemble(replace(E, {z: z + firedrake.Constant(α) * p}))
        while (Ez > E0 + armijo * α * dE_dp) or np.isnan(Ez):
            α /= 2
            Ez = assemble(replace(E, {z: z + firedrake.Constant(α) * p}))

        z.assign(z + firedrake.Constant(α) * p)
        if abs(dE_dp) < tolerance * assemble(scale):
            return z

    raise ValueError("Newton solver failed to converge after {0} iterations"
                     .format(max_iterations))
예제 #5
0
    def __init__(self, state, V, kappa, mu, bcs=None):
        super(InteriorPenalty, self).__init__(state)

        dt = state.timestepping.dt
        gamma = TestFunction(V)
        phi = TrialFunction(V)
        self.phi1 = Function(V)
        n = FacetNormal(state.mesh)
        a = inner(gamma, phi) * dx + dt * inner(grad(gamma),
                                                grad(phi) * kappa) * dx

        def get_flux_form(dS, M):

            fluxes = (-inner(2 * avg(outer(phi, n)), avg(grad(gamma) * M)) -
                      inner(avg(grad(phi) * M), 2 * avg(outer(gamma, n))) +
                      mu * inner(2 * avg(outer(phi, n)),
                                 2 * avg(outer(gamma, n) * kappa))) * dS
            return fluxes

        a += dt * get_flux_form(dS_v, kappa)
        a += dt * get_flux_form(dS_h, kappa)
        L = inner(gamma, phi) * dx
        problem = LinearVariationalProblem(a,
                                           action(L, self.phi1),
                                           self.phi1,
                                           bcs=bcs)
        self.solver = LinearVariationalSolver(problem)
예제 #6
0
    def __init__(self, problem, tolerance, solver_parameters=None, **kwargs):
        r"""Solve a MinimizationProblem using Newton's method with backtracking
        line search

        Parameters
        ----------
        problem : MinimizationProblem
            The particular problem instance to solve
        tolerance : float
            dimensionless tolerance for when to stop iterating, measured with
            with respect to the problem's scale functional
        solver_parameters : dict (optional)
            Linear solve parameters for computing the search direction
        armijo : float (optional)
            Parameter in the Armijo condition for line search; defaults to
            1e-4, see Nocedal and Wright
        contraction : float (optional)
            shrinking factor for backtracking line search; defaults to .5
        max_iterations : int (optional)
            maximum number of outer-level Newton iterations; defaults to 50
        """
        self.problem = problem
        self.tolerance = tolerance
        if solver_parameters is None:
            solver_parameters = default_solver_parameters

        self.armijo = kwargs.pop("armijo", 1e-4)
        self.contraction = kwargs.pop("contraction", 0.5)
        self.max_iterations = kwargs.pop("max_iterations", 50)

        u = self.problem.u
        V = u.function_space()
        v = firedrake.Function(V)
        self.v = v

        E = self.problem.E
        self.F = firedrake.derivative(E, u)
        self.J = firedrake.derivative(self.F, u)
        self.dE_dv = firedrake.action(self.F, v)

        bcs = None
        if self.problem.bcs:
            bcs = firedrake.homogenize(self.problem.bcs)
        problem = firedrake.LinearVariationalProblem(
            self.J,
            -self.F,
            v,
            bcs,
            constant_jacobian=False,
            form_compiler_parameters=self.problem.form_compiler_parameters,
        )
        self.search_direction_solver = firedrake.LinearVariationalSolver(
            problem, solver_parameters=solver_parameters
        )

        self.search_direction_solver.solve()
        self.t = firedrake.Constant(0.0)
        self.iteration = 0
예제 #7
0
def callback(solver):
    q = solver.search_direction
    dJ = solver.gradient
    dJ_dq = firedrake.action(dJ, q)
    decrement = firedrake.assemble(dJ_dq, **fc_params) / area

    error = firedrake.assemble(solver.objective) / area
    penalty = firedrake.assemble(solver.regularization) / area
    print(f'{error:10.4g} | {penalty:10.4g} | {decrement:10.4g}')
예제 #8
0
    def evaluate_adj_component(self,
                               inputs,
                               adj_inputs,
                               block_variable,
                               idx,
                               prepared=None):
        if not self.linear and self.func == block_variable.output:
            # We are not able to calculate derivatives wrt initial guess.
            return None
        F_form = prepared["form"]
        adj_sol = prepared["adj_sol"]
        adj_sol_bdy = prepared["adj_sol_bdy"]
        c = block_variable.output
        c_rep = block_variable.saved_output

        if isinstance(c, firedrake.Function):
            trial_function = firedrake.TrialFunction(c.function_space())
        elif isinstance(c, firedrake.Constant):
            mesh = self.compat.extract_mesh_from_form(F_form)
            trial_function = firedrake.TrialFunction(
                c._ad_function_space(mesh))
        elif isinstance(c, firedrake.DirichletBC):
            tmp_bc = self.compat.create_bc(
                c,
                value=self.compat.extract_subfunction(adj_sol_bdy,
                                                      c.function_space()))
            return [tmp_bc]
        elif isinstance(c, self.compat.MeshType):
            # Using CoordianteDerivative requires us to do action before
            # differentiating, might change in the future.
            F_form_tmp = firedrake.action(F_form, adj_sol)
            X = firedrake.SpatialCoordinate(c_rep)
            dFdm = firedrake.derivative(
                -F_form_tmp, X, firedrake.TestFunction(c._ad_function_space()))

            dFdm = self.compat.assemble_adjoint_value(dFdm,
                                                      **self.assemble_kwargs)
            return dFdm

        # dFdm_cache works with original variables, not block saved outputs.
        if c in self._dFdm_cache:
            dFdm = self._dFdm_cache[c]
        else:
            dFdm = -firedrake.derivative(self.lhs, c, trial_function)
            dFdm = firedrake.adjoint(dFdm)
            self._dFdm_cache[c] = dFdm

        # Replace the form coefficients with checkpointed values.
        replace_map = self._replace_map(dFdm)
        replace_map[self.func] = self.get_outputs()[0].saved_output
        dFdm = replace(dFdm, replace_map)

        dFdm = dFdm * adj_sol
        dFdm = self.compat.assemble_adjoint_value(dFdm, **self.assemble_kwargs)

        return dFdm
예제 #9
0
파일: inverse.py 프로젝트: vic1309/icepack
    def update_search_direction(self):
        r"""Solve the Gauss-Newton system for the new search direction using
        the preconditioned conjugate gradient method"""
        p, q, dJ = self.parameter, self.search_direction, self.gradient

        dR = derivative(self.regularization, self.parameter)
        Q = q.function_space()
        M = firedrake.TrialFunction(Q) * firedrake.TestFunction(Q) * dx + \
            derivative(dR, p)

        # Compute the preconditioned residual
        z = firedrake.Function(Q)
        firedrake.solve(M == -dJ,
                        z,
                        solver_parameters=self._solver_params,
                        form_compiler_parameters=self._fc_params)

        # This variable is a search direction for a search direction, which
        # is definitely not confusing at all.
        s = z.copy(deepcopy=True)
        q *= 0.0

        old_cost = np.inf
        while True:
            z_mnorm = self._assemble(firedrake.energy_norm(M, z))
            s_hnorm = self.gauss_newton_energy_norm(s)
            α = z_mnorm / s_hnorm

            δz = firedrake.Function(Q)
            g = self.gauss_newton_mult(s)
            firedrake.solve(M == g,
                            δz,
                            solver_parameters=self._solver_params,
                            form_compiler_parameters=self._fc_params)

            q += α * s
            z -= α * δz

            β = self._assemble(firedrake.energy_norm(M, z)) / z_mnorm
            s *= β
            s += z

            energy_norm = self.gauss_newton_energy_norm(q)
            cost = 0.5 * energy_norm + self._assemble(action(dJ, q))

            if (abs(old_cost - cost) /
                (0.5 * energy_norm) < self._search_tolerance):
                return

            old_cost = cost
예제 #10
0
파일: inverse.py 프로젝트: vic1309/icepack
    def gauss_newton_mult(self, q):
        """Multiply a field by the Gauss-Newton operator"""
        u, p = self.state, self.parameter

        dE = derivative(self._E, u)
        dR = derivative(self._R, p)
        dF_du, dF_dp = self._dF_du, derivative(self._F, p)

        w = firedrake.Function(u.function_space())
        firedrake.solve(dF_du == action(dF_dp, q),
                        w,
                        self._bc,
                        solver_parameters=self._solver_params,
                        form_compiler_parameters=self._fc_params)

        v = firedrake.Function(u.function_space())
        firedrake.solve(adjoint(dF_du) == derivative(dE, u, w),
                        v,
                        self._bc,
                        solver_parameters=self._solver_params,
                        form_compiler_parameters=self._fc_params)

        return action(adjoint(dF_dp), v) + derivative(dR, p, q)
예제 #11
0
파일: inverse.py 프로젝트: vic1309/icepack
    def _setup(self, problem, callback=(lambda s: None)):
        self._problem = problem
        self._callback = callback

        self._p = problem.parameter.copy(deepcopy=True)
        self._u = problem.state.copy(deepcopy=True)

        self._model_args = dict(**problem.model_args,
                                dirichlet_ids=problem.dirichlet_ids)
        u_name, p_name = problem.state_name, problem.parameter_name
        args = dict(**self._model_args, **{u_name: self._u, p_name: self._p})

        # Make the form compiler use a reasonable number of quadrature points
        degree = problem.model.quadrature_degree(**args)
        self._fc_params = {'quadrature_degree': degree}

        # Create the error, regularization, and barrier functionals
        self._E = problem.objective(self._u)
        self._R = problem.regularization(self._p)
        self._J = self._E + self._R

        # Create the weak form of the forward model, the adjoint state, and
        # the derivative of the objective functional
        self._F = derivative(problem.model.action(**args), self._u)
        self._dF_du = derivative(self._F, self._u)

        # Create a search direction
        dR = derivative(self._R, self._p)
        self._solver_params = {'ksp_type': 'preonly', 'pc_type': 'lu'}
        Q = self._p.function_space()
        self._q = firedrake.Function(Q)

        # Create the adjoint state variable
        V = self.state.function_space()
        self._λ = firedrake.Function(V)
        dF_dp = derivative(self._F, self._p)

        # Create Dirichlet BCs where they apply for the adjoint solve
        rank = self._λ.ufl_element().num_sub_elements()
        if rank == 0:
            zero = firedrake.Constant(0)
        else:
            zero = firedrake.as_vector((0, ) * rank)
        self._bc = firedrake.DirichletBC(V, zero, problem.dirichlet_ids)

        # Create the derivative of the objective functional
        self._dE = derivative(self._E, self._u)
        dR = derivative(self._R, self._p)
        self._dJ = (action(adjoint(dF_dp), self._λ) + dR)
예제 #12
0
    def __init__(self, state, V, direction=[], supg_params=None):
        super(SUPGAdvection, self).__init__(state)
        dt = state.timestepping.dt
        params = supg_params.copy() if supg_params else {}
        params.setdefault('a0', dt/sqrt(15.))
        params.setdefault('a1', dt/sqrt(15.))

        gamma = TestFunction(V)
        theta = TrialFunction(V)
        self.theta0 = Function(V)

        # make SUPG test function
        taus = [params["a0"], params["a1"]]
        for i in direction:
            taus[i] = 0.0
        tau = Constant(((taus[0], 0.), (0., taus[1])))

        dgamma = dot(dot(self.ubar, tau), grad(gamma))
        gammaSU = gamma + dgamma

        n = FacetNormal(state.mesh)
        un = 0.5*(dot(self.ubar, n) + abs(dot(self.ubar, n)))

        a_mass = gammaSU*theta*dx
        arhs = a_mass - dt*gammaSU*dot(self.ubar, grad(theta))*dx

        if 1 in direction:
            arhs -= (
                dt*dot(jump(gammaSU), (un('+')*theta('+')
                                       - un('-')*theta('-')))*dS_v
                - dt*(gammaSU('+')*dot(self.ubar('+'), n('+'))*theta('+')
                      + gammaSU('-')*dot(self.ubar('-'), n('-'))*theta('-'))*dS_v
            )
        if 2 in direction:
            arhs -= (
                dt*dot(jump(gammaSU), (un('+')*theta('+')
                                       - un('-')*theta('-')))*dS_h
                - dt*(gammaSU('+')*dot(self.ubar('+'), n('+'))*theta('+')
                      + gammaSU('-')*dot(self.ubar('-'), n('-'))*theta('-'))*dS_h
            )

        self.theta1 = Function(V)
        self.dtheta = Function(V)
        problem = LinearVariationalProblem(a_mass, action(arhs,self.theta1), self.dtheta)
        self.solver = LinearVariationalSolver(problem,
                                              options_prefix='SUPGAdvection')
예제 #13
0
def complexity(form, parameters, action=False):
    if action:
        coef = firedrake.Function(form.arguments()[0].function_space())
        form = firedrake.action(form, coef)
    impero_kernel, index_names = tsfc.driver.compile_form(
        form, parameters=parameters)[0]

    indices = IndexDict(
        {idx: sympy.symbols(name)
         for idx, name in index_names})

    expr = expression(impero_kernel.tree,
                      impero_kernel.temporaries,
                      indices,
                      top=True)
    p1 = sympy.symbols("p") + 1
    '''Currently assume p+1 quad points in each direction.'''
    return expr.subs([(i, p1) for i in indices.values()]).expand()
예제 #14
0
파일: inverse.py 프로젝트: benhills/icepack
    def reinit(self):
        r"""Restart the solution to 0"""
        self._iteration = 0

        M = self._preconditioner
        z = self.residual
        s = self.search_direction
        Gs = self.operator_product
        Q = z.function_space()

        self.solution.assign(firedrake.Function(Q))
        self._residual_solver.solve()
        s.assign(z)
        self._residual_energy = self._assemble(firedrake.energy_norm(M, z))

        self.update_state()
        self._search_direction_energy = self._assemble(action(Gs, s))

        self._energy = 0.
        self._objective = 0.
예제 #15
0
    def __init__(self, state, V, continuity=False):

        super(DGAdvection, self).__init__(state)

        element = V.fiat_element
        assert element.entity_dofs() == element.entity_closure_dofs(), "Provided space is not discontinuous"
        dt = state.timestepping.dt

        if V.extruded:
            surface_measure = (dS_h + dS_v)
        else:
            surface_measure = dS

        phi = TestFunction(V)
        D = TrialFunction(V)
        self.D1 = Function(V)
        self.dD = Function(V)

        n = FacetNormal(state.mesh)
        # ( dot(v, n) + |dot(v, n)| )/2.0
        un = 0.5*(dot(self.ubar, n) + abs(dot(self.ubar, n)))

        a_mass = inner(phi,D)*dx

        if continuity:
            a_int = -inner(grad(phi), outer(D, self.ubar))*dx
        else:
            a_int = -inner(div(outer(phi,self.ubar)),D)*dx

        a_flux = (dot(jump(phi), un('+')*D('+') - un('-')*D('-')))*surface_measure
        arhs = a_mass - dt*(a_int + a_flux)

        DGproblem = LinearVariationalProblem(a_mass, action(arhs,self.D1),
                                             self.dD)
        self.DGsolver = LinearVariationalSolver(DGproblem,
                                                solver_parameters={
                                                    'ksp_type':'preonly',
                                                    'pc_type':'bjacobi',
                                                    'sub_pc_type': 'ilu'},
                                                options_prefix='DGAdvection')
예제 #16
0
    def solve(self, atol=0.0, rtol=1e-6, etol=0.0, max_iterations=200):
        r"""Search for a new value of the parameters, stopping once either
        the objective functional gets below a threshold value or stops
        improving.

        Parameters
        ----------
        atol : float
            Absolute stopping tolerance; stop iterating when the objective
            drops below this value
        rtol : float
            Relative stopping tolerance; stop iterating when the relative
            decrease in the objective drops below this value
        etol : float
            Expectation stopping tolerance; stop iterating when the relative
            expected decrease in the objective from the Newton decrement drops
            below this value
        max_iterations : int
            Maximum number of iterations to take
        """
        J_initial = np.inf

        for iteration in range(max_iterations):
            J = self._assemble(self._J)

            q = self.search_direction
            dJ_dq = self._assemble(firedrake.action(self.gradient, q))

            if (
                ((J_initial - J) < rtol * J_initial)
                or (-dJ_dq < etol * J)
                or (J <= atol)
            ):
                return iteration

            J_initial = J
            self.step()

        return max_iterations
예제 #17
0
    def __init__(self, equation, alpha):

        residual = equation.residual.label_map(
            lambda t: t.has_label(linearisation),
            lambda t: Term(t.get(linearisation).form, t.labels), drop)

        dt = equation.state.dt
        W = equation.function_space
        beta = dt * alpha

        # Split up the rhs vector (symbolically)
        self.xrhs = Function(W)

        aeqn = residual.label_map(
            lambda t:
            (t.has_label(time_derivative) and t.has_label(linearisation)),
            map_if_false=lambda t: beta * t)
        Leqn = residual.label_map(
            lambda t:
            (t.has_label(time_derivative) and t.has_label(linearisation)),
            map_if_false=drop)

        # Place to put result of solver
        self.dy = Function(W)

        # Solver
        bcs = equation.bcs['u']
        problem = LinearVariationalProblem(aeqn.form,
                                           action(Leqn.form, self.xrhs),
                                           self.dy,
                                           bcs=bcs)

        self.solver = LinearVariationalSolver(
            problem,
            solver_parameters=self.solver_parameters,
            options_prefix='linear_solver')
예제 #18
0
 def apply_adjoint_action(self, x, out):
     # fd.assemble(fd.action(self.a, x), tensor=out)
     out.assign(fd.assemble(fd.action(self.a, x)))
예제 #19
0
x = ufl.SpatialCoordinate(mesh)
expr = ufl.as_vector([ufl.sin(2 * ufl.pi * x[0]), ufl.cos(2 * ufl.pi * x[1])])
u = fd.interpolate(expr, V)

u_dot = fd.Function(V)
v = fd.TestFunction(V)

nu = fd.Constant(0.0001)  # for burgers
if equation == "heat":
    nu = fd.Constant(0.1)  # for heat

M = fd.derivative(fd.inner(u, v) * fd.dx, u)
R = -(fd.inner(fd.grad(u) * u, v) + nu * fd.inner(fd.grad(u), fd.grad(v))) * fd.dx
if equation == "heat":
    R = -nu * fd.inner(fd.grad(u), fd.grad(v)) * fd.dx
F = fd.action(M, u_dot) - R

bc = fd.DirichletBC(V, (0.0, 0.0), "on_boundary")

t = 0.0
end = 0.1
tspan = (t, end)

state_out = fd.File("result/state.pvd")


def ts_monitor(ts, steps, time, X):
    state_out.write(u, time=time)


problem = firedrake_ts.DAEProblem(F, u, u_dot, tspan, bcs=bc)
예제 #20
0
파일: inverse.py 프로젝트: benhills/icepack
    def __init__(self, solver):
        r"""State machine for solving the Gauss-Newton subproblem via the
        preconditioned conjugate gradient method"""
        self._assemble = solver._assemble
        u = solver.state
        p = solver.parameter
        E = solver._E
        dE = derivative(E, u)
        R = solver._R
        dR = derivative(R, p)
        F = solver._F
        dF_du = derivative(F, u)
        dF_dp = derivative(F, p)
        # TODO: Make this an arbitrary RHS -- the solver can set it to the
        # gradient if we want
        dJ = solver.gradient
        bc = solver._bc

        V = u.function_space()
        Q = p.function_space()

        # Create the preconditioned residual and solver
        z = firedrake.Function(Q)
        s = firedrake.Function(Q)
        φ, ψ = firedrake.TestFunction(Q), firedrake.TrialFunction(Q)
        M = φ * ψ * dx + derivative(dR, p)
        residual_problem = firedrake.LinearVariationalProblem(
            M,
            -dJ,
            z,
            form_compiler_parameters=solver._fc_params,
            constant_jacobian=False)
        residual_solver = firedrake.LinearVariationalSolver(
            residual_problem, solver_parameters=solver._solver_params)

        self._preconditioner = M
        self._residual = z
        self._search_direction = s
        self._residual_solver = residual_solver

        # Create a variable to store the current solution of the Gauss-Newton
        # problem and the solutions of the auxiliary tangent sub-problems
        q = firedrake.Function(Q)
        v = firedrake.Function(V)
        w = firedrake.Function(V)

        # Create linear problem and solver objects for the auxiliary tangent
        # sub-problems
        tangent_linear_problem = firedrake.LinearVariationalProblem(
            dF_du,
            action(dF_dp, s),
            w,
            bc,
            form_compiler_parameters=solver._fc_params,
            constant_jacobian=False)
        tangent_linear_solver = firedrake.LinearVariationalSolver(
            tangent_linear_problem, solver_parameters=solver._solver_params)

        adjoint_tangent_linear_problem = firedrake.LinearVariationalProblem(
            adjoint(dF_du),
            derivative(dE, u, w),
            v,
            bc,
            form_compiler_parameters=solver._fc_params,
            constant_jacobian=False)
        adjoint_tangent_linear_solver = firedrake.LinearVariationalSolver(
            adjoint_tangent_linear_problem,
            solver_parameters=solver._solver_params)

        self._rhs = dJ
        self._solution = q
        self._tangent_linear_solution = w
        self._tangent_linear_solver = tangent_linear_solver
        self._adjoint_tangent_linear_solution = v
        self._adjoint_tangent_linear_solver = adjoint_tangent_linear_solver

        self._product = action(adjoint(dF_dp), v) + derivative(dR, p, s)

        # Create the update to the residual and the associated solver
        δz = firedrake.Function(Q)
        Gs = self._product
        delta_residual_problem = firedrake.LinearVariationalProblem(
            M,
            Gs,
            δz,
            form_compiler_parameters=solver._fc_params,
            constant_jacobian=False)
        delta_residual_solver = firedrake.LinearVariationalSolver(
            delta_residual_problem, solver_parameters=solver._solver_params)

        self._delta_residual = δz
        self._delta_residual_solver = delta_residual_solver

        self._residual_energy = 0.
        self._search_direction_energy = 0.

        self.reinit()
예제 #21
0
파일: inverse.py 프로젝트: benhills/icepack
    def _setup(self, problem, callback=(lambda s: None)):
        self._problem = problem
        self._callback = callback

        self._p = problem.parameter.copy(deepcopy=True)
        self._u = problem.state.copy(deepcopy=True)

        self._solver = self.problem.solver_type(self.problem.model,
                                                **self.problem.solver_kwargs)
        u_name, p_name = problem.state_name, problem.parameter_name
        solve_kwargs = dict(**problem.diagnostic_solve_kwargs, **{
            u_name: self._u,
            p_name: self._p
        })

        # Make the form compiler use a reasonable number of quadrature points
        degree = problem.model.quadrature_degree(**solve_kwargs)
        self._fc_params = {'quadrature_degree': degree}

        # Create the error, regularization, and barrier functionals
        self._E = problem.objective(self._u)
        self._R = problem.regularization(self._p)
        self._J = self._E + self._R

        # Create the weak form of the forward model, the adjoint state, and
        # the derivative of the objective functional
        A = problem.model.action(**solve_kwargs)
        self._F = derivative(A, self._u)
        self._dF_du = derivative(self._F, self._u)

        # Create a search direction
        dR = derivative(self._R, self._p)
        # TODO: Make this customizable
        self._solver_params = default_solver_parameters
        Q = self._p.function_space()
        self._q = firedrake.Function(Q)

        # Create the adjoint state variable
        V = self.state.function_space()
        self._λ = firedrake.Function(V)
        dF_dp = derivative(self._F, self._p)

        # Create Dirichlet BCs where they apply for the adjoint solve
        rank = self._λ.ufl_element().num_sub_elements()
        if rank == 0:
            zero = Constant(0)
        else:
            zero = firedrake.as_vector((0, ) * rank)
        self._bc = firedrake.DirichletBC(V, zero, problem.dirichlet_ids)

        # Create the derivative of the objective functional
        self._dE = derivative(self._E, self._u)
        dR = derivative(self._R, self._p)
        self._dJ = (action(adjoint(dF_dp), self._λ) + dR)

        # Create problem and solver objects for the adjoint state
        L = adjoint(self._dF_du)
        adjoint_problem = firedrake.LinearVariationalProblem(
            L,
            -self._dE,
            self._λ,
            self._bc,
            form_compiler_parameters=self._fc_params,
            constant_jacobian=False)
        self._adjoint_solver = firedrake.LinearVariationalSolver(
            adjoint_problem, solver_parameters=self._solver_params)
예제 #22
0
def newton_search(E, u, bc, tolerance, scale,
                  max_iterations=50, armijo=1e-4, contraction_factor=0.5,
                  form_compiler_parameters={},
                  solver_parameters={'ksp_type': 'preonly', 'pc_type': 'lu'}):
    r"""Find the minimizer of a convex functional

    Parameters
    ----------
    E : firedrake.Form
        The functional to be minimized
    u0 : firedrake.Function
        Initial guess for the minimizer
    tolerance : float
        Stopping criterion for the optimization procedure
    scale : firedrake.Form
        A positive scale functional by which to measure the objective
    max_iterations : int, optional
        Optimization procedure will stop at this many iterations regardless
        of convergence
    armijo : float, optional
        The constant in the Armijo condition (see Nocedal and Wright)
    contraction_factor : float, optional
        The amount by which to backtrack in the line search if the Armijo
        condition is not satisfied
    form_compiler_parameters : dict, optional
        Extra options to pass to the firedrake form compiler
    solver_parameters : dict, optional
        Extra options to pass to the linear solver

    Returns
    -------
    firedrake.Function
        The approximate minimizer of `E` to within tolerance
    """
    F = firedrake.derivative(E, u)
    H = firedrake.derivative(F, u)
    v = firedrake.Function(u.function_space())
    dE_dv = firedrake.action(F, v)

    def assemble(*args, **kwargs):
        return firedrake.assemble(
            *args, **kwargs, form_compiler_parameters=form_compiler_parameters)

    problem = firedrake.LinearVariationalProblem(H, -F, v, bc,
                  form_compiler_parameters=form_compiler_parameters,
                  constant_jacobian=False)
    solver = firedrake.LinearVariationalSolver(problem,
                 solver_parameters=solver_parameters)

    n = 0
    while True:
        # Compute a search direction
        solver.solve()

        # Compute the directional derivative, check if we're done
        slope = assemble(dE_dv)
        assert slope < 0
        if (abs(slope) < assemble(scale) * tolerance) or (n >= max_iterations):
            return u

        # Backtracking search
        E0 = assemble(E)
        α = firedrake.Constant(1)
        Eα = firedrake.replace(E, {u: u + α * v})
        while assemble(Eα) > E0 + armijo * α.values()[0] * slope:
            α.assign(α * contraction_factor)

        u.assign(u + α * v)
        n += 1
예제 #23
0
def compliance_optimization(n_iters=200):

    output_dir = "cantilever/"

    path = os.path.abspath(__file__)
    dir_path = os.path.dirname(path)
    m = fd.Mesh(f"{dir_path}/mesh_cantilever.msh")
    mesh = fd.MeshHierarchy(m, 0)[-1]

    # Perturb the mesh coordinates. Necessary to calculate shape derivatives
    S = fd.VectorFunctionSpace(mesh, "CG", 1)
    s = fd.Function(S, name="deform")
    mesh.coordinates.assign(mesh.coordinates + s)

    # Initial level set function
    x, y = fd.SpatialCoordinate(mesh)
    PHI = fd.FunctionSpace(mesh, "CG", 1)
    lx = 2.0
    ly = 1.0
    phi_expr = (
        -cos(6.0 / lx * pi * x) * cos(4.0 * pi * y)
        - 0.6
        + max_value(200.0 * (0.01 - x ** 2 - (y - ly / 2) ** 2), 0.0)
        + max_value(100.0 * (x + y - lx - ly + 0.1), 0.0)
        + max_value(100.0 * (x - y - lx + 0.1), 0.0)
    )
    # Avoid recording the operation interpolate into the tape.
    # Otherwise, the shape derivatives will not be correct
    with fda.stop_annotating():
        phi = fd.interpolate(phi_expr, PHI)
        phi.rename("LevelSet")
        fd.File(output_dir + "phi_initial.pvd").write(phi)

    # Physics. Elasticity
    rho_min = 1e-5
    beta = fd.Constant(200.0)

    def hs(phi, beta):
        return fd.Constant(1.0) / (
            fd.Constant(1.0) + exp(-beta * phi)
        ) + fd.Constant(rho_min)

    H1_elem = fd.VectorElement("CG", mesh.ufl_cell(), 1)
    W = fd.FunctionSpace(mesh, H1_elem)

    u = fd.TrialFunction(W)
    v = fd.TestFunction(W)

    # Elasticity parameters
    E, nu = 1.0, 0.3
    mu, lmbda = fd.Constant(E / (2 * (1 + nu))), fd.Constant(
        E * nu / ((1 + nu) * (1 - 2 * nu))
    )

    def epsilon(u):
        return sym(nabla_grad(u))

    def sigma(v):
        return 2.0 * mu * epsilon(v) + lmbda * tr(epsilon(v)) * Identity(2)

    a = inner(hs(-phi, beta) * sigma(u), nabla_grad(v)) * dx
    t = fd.Constant((0.0, -75.0))
    L = inner(t, v) * ds(2)

    bc = fd.DirichletBC(W, fd.Constant((0.0, 0.0)), 1)
    parameters = {
        "ksp_type": "preonly",
        "pc_type": "lu",
        "mat_type": "aij",
        "ksp_converged_reason": None,
        "pc_factor_mat_solver_type": "mumps",
    }
    u_sol = fd.Function(W)
    F = fd.action(a, u_sol) - L
    problem = fd.NonlinearVariationalProblem(F, u_sol, bcs=bc)
    solver = fd.NonlinearVariationalSolver(
        problem, solver_parameters=parameters
    )
    solver.solve()
    # fd.solve(
    #    a == L, u_sol, bcs=[bc], solver_parameters=parameters
    # )  # , nullspace=nullspace)
    with fda.stop_annotating():
        fd.File("u_sol.pvd").write(u_sol)

    # Cost function: Compliance
    J = fd.assemble(
        fd.Constant(1e-2)
        * inner(hs(-phi, beta) * sigma(u_sol), epsilon(u_sol))
        * dx
    )

    # Constraint: Volume
    with fda.stop_annotating():
        total_volume = fd.assemble(fd.Constant(1.0) * dx(domain=mesh))
    VolPen = fd.assemble(hs(-phi, beta) * dx)
    # Needed to track the value of the volume
    VolControl = fda.Control(VolPen)
    Vval = total_volume / 2.0

    phi_pvd = fd.File("phi_evolution.pvd", target_continuity=fd.H1)

    def deriv_cb(phi):
        with fda.stop_annotating():
            phi_pvd.write(phi[0])

    c = fda.Control(s)
    Jhat = LevelSetFunctional(J, c, phi, derivative_cb_pre=deriv_cb)
    Vhat = LevelSetFunctional(VolPen, c, phi)
    beta_param = 0.1
    # Boundary conditions for the shape derivatives.
    # They must be zero at the boundary conditions.
    bcs_vel = fd.DirichletBC(S, fd.Constant((0.0, 0.0)), (1, 2))
    # Regularize the shape derivatives
    reg_solver = RegularizationSolver(
        S,
        mesh,
        beta=beta_param,
        gamma=1.0e5,
        dx=dx,
        bcs=bcs_vel,
        output_dir=None,
    )
    # Hamilton-Jacobi equation to advect the level set
    dt = 0.05
    tol = 1e-5

    # Optimization problem
    vol_constraint = Constraint(Vhat, Vval, VolControl)
    problem = InfDimProblem(Jhat, reg_solver, ineqconstraints=vol_constraint)

    parameters = {
        "ksp_type": "preonly",
        "pc_type": "lu",
        "mat_type": "aij",
        "ksp_converged_reason": None,
        "pc_factor_mat_solver_type": "mumps",
    }

    params = {
        "alphaC": 3.0,
        "K": 0.1,
        "debug": 5,
        "alphaJ": 1.0,
        "dt": dt,
        "maxtrials": 10,
        "maxit": n_iters,
        "itnormalisation": 50,
        "tol": tol,
    }
    results = nlspace_solve(problem, params)

    return results