示例#1
0
def evaluate_pushforward(
    firedrake_output: BackendVariable,
    firedrake_inputs: Collection[BackendVariable],
    tape: pyadjoint.Tape,
    Δnumpy_inputs: Collection[np.array],
) -> Collection[np.array]:
    """Pushforward is a function to propagate the derivative information from inputs to outputs.
    It also corresponds to evaluating a Jacobian vector product.
    This is a forward-mode automatic differentiation.
    Input:
        firedrake_output (AdjFloat or Function): Firedrake representation of the output from firedrake_function(*firedrake_inputs)
        firedrake_inputs (collection of BackendVariable): Firedrake representation of the input args
        tape (pyadjoint.Tape): pyadjoint's saved computational graph
        Δnumpy_inputs (collection of np.array): NumPy array representation of the tangent vector to multiply with Jacobian
    Output:
        dnumpy_output (np.array):
            NumPy array representation of the `Δnumpy_inputs` times Jacobian
            of firedrake_function(*firedrake_inputs) wrt to every firedrake_input
    """

    # Now tangent (pushforward) evaluation!
    tape.reset_variables()

    Δfiredrake_inputs = convert_all_to_backend(firedrake_inputs, *Δnumpy_inputs)
    for fi, Δfi in zip(firedrake_inputs, Δfiredrake_inputs):
        fi.block_variable.tlm_value = Δfi

    tape.evaluate_tlm()

    dfiredrake_output = firedrake_output.block_variable.tlm_value
    dnumpy_output = to_numpy(dfiredrake_output)

    return dnumpy_output
示例#2
0
def evaluate_vjp(
    dnumpy_output: np.array,
    fenics_output: FenicsVariable,
    fenics_inputs: Iterable[FenicsVariable],
    tape: pyadjoint.Tape,
) -> Tuple[np.array]:
    """Computes the gradients of the output with respect to the inputs.
    Input:
        Δfenics_output (np.array): NumPy array representation of the tangent covector to multiply transposed jacobian with
        fenics_output (AdjFloat or Function): FEniCS representation of the output from fenics_function(*fenics_inputs)
        fenics_inputs (list of FenicsVariable): FEniCS representation of the input args
        tape (pyadjoint.Tape): pyadjoint's saved computational graph
    Output:
        dnumpy_inputs (list of np.array):
            NumPy array representation of the `Δfenics_output` times jacobian
            of fenics_function(*fenics_inputs) wrt to every fenics_input
    """
    # Convert tangent covector (adjoint variable) to a FEniCS variable
    Δfenics_output = numpy_to_fenics(dnumpy_output, fenics_output)
    if isinstance(Δfenics_output, (fenics.Function, fenics_adjoint.Function)):
        Δfenics_output = Δfenics_output.vector()

    tape.reset_variables()
    fenics_output.block_variable.adj_value = Δfenics_output
    with tape.marked_nodes(fenics_inputs):
        tape.evaluate_adj(markings=True)
    dfenics_inputs = (fi.block_variable.adj_value for fi in fenics_inputs)

    # Convert FEniCS gradients to NumPy array representation
    dnumpy_inputs = tuple(
        None if dfi is None else np.asarray(fenics_to_numpy(dfi))
        for dfi in dfenics_inputs)

    return dnumpy_inputs
示例#3
0
def vjp_fem_eval_impl(
    g: np.array,
    fenics_output: FenicsVariable,
    fenics_inputs: Iterable[FenicsVariable],
    tape: pyadjoint.Tape,
) -> Tuple[np.array]:
    """Computes the gradients of the output with respect to the inputs."""
    # Convert tangent covector (adjoint) to a FEniCS variable
    adj_value = numpy_to_fenics(g, fenics_output)
    if isinstance(adj_value, (fenics.Function, fenics_adjoint.Function)):
        adj_value = adj_value.vector()

    tape.reset_variables()
    fenics_output.block_variable.adj_value = adj_value
    with tape.marked_nodes(fenics_inputs):
        tape.evaluate_adj(markings=True)
    fenics_grads = [fi.block_variable.adj_value for fi in fenics_inputs]

    # Convert FEniCS gradients to jax array representation
    jax_grads = (None if fg is None else np.asarray(fenics_to_numpy(fg))
                 for fg in fenics_grads)

    jax_grad_tuple = tuple(jax_grads)

    return jax_grad_tuple
示例#4
0
def evaluate_pullback(
    firedrake_output: BackendVariable,
    firedrake_inputs: Collection[BackendVariable],
    tape: pyadjoint.Tape,
    Δnumpy_output: np.array,
) -> Collection[np.array]:
    """Pullback is a function to propagate the derivative information from outputs to inputs.
    It also corresponds to evaluating a Jacobian transpose vector product or vector Jacobian product.
    This is a reverse-mode automatic differentiation.
    Input:
        firedrake_output (AdjFloat or Function): Firedrake representation of the output from firedrake_function(*firedrake_inputs)
        firedrake_inputs (collection of BackendVariable): Firedrake representation of the input args
        tape (pyadjoint.Tape): pyadjoint's saved computational graph
        Δnumpy_output (np.array): NumPy array representation of the tangent covector to multiply transposed Jacobian with
    Output:
        dnumpy_inputs (collection of np.array):
            NumPy array representation of the `Δnumpy_output` times Jacobian
            of firedrake_function(*firedrake_inputs) wrt to every firedrake_input
    """
    # Convert tangent covector (adjoint variable) to a backend variable
    Δfiredrake_output = from_numpy(Δnumpy_output, firedrake_output)

    # pyadjoint doesn't allow setting Functions to block_variable.adj_value
    backend = get_backend(firedrake_inputs[0])
    if isinstance(Δfiredrake_output, backend.Function):
        Δfiredrake_output = Δfiredrake_output.vector()

    tape.reset_variables()
    firedrake_output.block_variable.adj_value = Δfiredrake_output
    with tape.marked_nodes(firedrake_inputs):
        tape.evaluate_adj(markings=True)
    dfiredrake_inputs = (fi.block_variable.adj_value for fi in firedrake_inputs)

    # Convert Firedrake gradients to NumPy array representation
    dnumpy_inputs = tuple(
        None if dfi is None else np.asarray(to_numpy(dfi)) for dfi in dfiredrake_inputs
    )

    return dnumpy_inputs
示例#5
0
def pytest_runtest_setup(item):
    """ Hook function which is called before every test """
    set_working_tape(Tape())

    # Fix the seed to avoid random test failures due to slight tolerance variations
    numpy.random.seed(21)
示例#6
0
from .assembly import assemble, assemble_system
from .solving import solve
from .projection import project
from .interpolation import interpolate
from .ufl_constraints import UFLEqualityConstraint, UFLInequalityConstraint
from .shapead_transformations import (transfer_from_boundary,
                                      transfer_to_boundary)
if backend.__name__ != "firedrake":
    from .newton_solver import NewtonSolver
    from .lu_solver import LUSolver
    from .krylov_solver import KrylovSolver
    from .petsc_krylov_solver import PETScKrylovSolver
    from .types import *
    from .refine import refine
    from .system_assembly import *

from .variational_solver import (NonlinearVariationalProblem,
                                 NonlinearVariationalSolver,
                                 LinearVariationalProblem,
                                 LinearVariationalSolver)
from pyadjoint import (Tape, set_working_tape, get_working_tape,
                       pause_annotation, continue_annotation,
                       ReducedFunctional, taylor_test, taylor_to_dict,
                       compute_gradient, compute_hessian, AdjFloat, Control,
                       minimize, maximize, MinimizationProblem, IPOPTSolver,
                       ROLSolver, InequalityConstraint, EqualityConstraint,
                       MoolaOptimizationProblem, print_optimization_methods,
                       stop_annotating)

set_working_tape(Tape())