Example #1
0
class Dot(MetaField2):
    "Compute the dot product between two fields"

    def compute(self, get):
        u1 = get(self.valuename1)
        u2 = get(self.valuename2)

        if u1 is None or u2 is None:
            return

        if not (isinstance(u1, GenericFunction)
                or isinstance(u2, GenericFunction)):
            return npdot(u1, u2)

        if not isinstance(u1, GenericFunction):
            u1 = Constant(u1)
            u1, u2 = u2, u1
        if not isinstance(u2, GenericFunction):
            u2 = Constant(u2)

        if isinstance(u2, Function):
            u1, u2 = u2, u1

        assert isinstance(u1, Function)
        assert isinstance(u2, GenericFunction)

        if u1.value_rank() == u2.value_rank():
            if u1.value_rank() == 0:
                V = u1.function_space()
            else:
                V = u1.function_space().sub(0).collapse()
        elif u1.value_rank() > u2.value_rank():
            assert u2.value_rank() == 0
            V = u1.function_space()
            u1, u2 = u2, u1
        else:
            assert isinstance(u2, Function)
            assert u1.value_rank() == 0
            V = u2.function_space()

        #N = max([u1.value_rank(), u2.value_rank()])

        if not hasattr(self, "u"):
            self.u = Function(V)

        if isinstance(u2, Function) and u1.function_space().dim(
        ) == u2.function_space().dim() and u1.value_rank() == 0:
            self.u.vector()[:] = u1.vector().array() * u2.vector().array()
        elif u1.value_rank() == u2.value_rank():
            project(dot(u1, u2), function=self.u, V=self.u.function_space())
        else:
            assert u1.value_rank() == 0
            if isinstance(u1, Constant):
                self.u.vector()[:] = float(u1) * u2.vector().array()
            else:
                project(u1 * u2, function=self.u, V=self.u.function_space())

        return self.u
Example #2
0
    def update(self, timestep: int, time: float,
               data: dolfin.Function) -> None:
        """Update the data."""
        if not self.save_this_timestep(timestep, time):
            return

        if self._first_compute:
            self._first_compute = False
            if df.MPI.rank(df.MPI.comm_world) == 0:
                self._path.mkdir(parents=False, exist_ok=True)
            df.MPI.barrier(df.MPI.comm_world)

            # Update spec with element specifications
            spec_dict = self.spec._asdict()
            element = data.function_space().ufl_element()
            spec_dict["element_family"] = str(
                element.family())  # e.g. Lagrange
            spec_dict["element_degree"] = element.degree()

            store_metadata(
                self.path / "metadata_{name}.yaml".format(name=self.name),
                spec_dict)

        if "hdf5" in self.spec.save_as:
            self._store_field_hdf5(timestep, time, data)

        if "xdmf" in self.spec.save_as:
            self._store_field_xdmf(timestep, time, data)

        if "checkpoint" in self.spec.save_as:
            self._checkpoint(timestep, time, data)
Example #3
0
        def restart_conditions(spaces, loadables):
            # loadables[restart_time0][solution_name] = [(t0, Lt0)] # will load Lt0
            # loadables[restart_time0][solution_name] = [(t0, Lt0), (t1, Lt1)] # will interpolate to restart_time
            functions = {}
            for t in loadables:
                functions[t] = dict()
                for solution_name in loadables[t]:
                    assert len(loadables[t][solution_name]) in [1, 2]

                    if len(loadables[t][solution_name]) == 1:
                        f = loadables[t][solution_name][0][1]()
                    elif len(loadables[t][solution_name]) == 2:
                        # Interpolate
                        t0, Lt0 = loadables[t][solution_name][0]
                        t1, Lt1 = loadables[t][solution_name][1]

                        assert t0 <= t <= t1
                        if Lt0.function is not None:

                            # The copy-function raise a PETSc-error in parallel
                            #f = Function(Lt0())
                            f0 = Lt0()
                            f = Function(f0.function_space())
                            f.vector().axpy(1.0, f0.vector())
                            del f0

                            df = Lt1().vector()
                            df.axpy(-1.0, f.vector())
                            f.vector().axpy((t - t0) / (t1 - t0), df)
                        else:
                            f0 = Lt0()
                            f1 = Lt1()
                            datatype = type(f0)
                            if not issubclass(datatype, Iterable):
                                f0 = [f0]
                                f1 = [f1]

                            f = []
                            for _f0, _f1 in zip(f0, f1):
                                val = _f0 + (t - t0) / (t1 - t0) * (_f1 - _f0)
                                f.append(val)

                            if not issubclass(datatype, Iterable):
                                f = f[0]
                            else:
                                f = datatype(f)

                    if solution_name in spaces:
                        space = spaces[solution_name]
                        if space != f.function_space():
                            #from fenicstools import interpolate_nonmatching_mesh
                            #f = interpolate_nonmatching_mesh(f, space)
                            try:
                                f = interpolate(f, space)
                            except:
                                f = project(f, space)

                    functions[t][solution_name] = f

            return functions
Example #4
0
def curvilinear_coordinate_1d(mb, p0=0, function=None):
    "Returns parametrization of a curve"

    # edge-to-vertex connectivity
    EE = np.zeros((mb.num_cells(), mb.num_vertices()), dtype=bool)
    for e in edges(mb):
        EE[e.index(), e.entities(0)] = True

    # vertex-to-vertex connectivity (via edges)
    PP = EE.T @ EE
    np.fill_diagonal(PP, False)
    mmap = -np.ones(PP.shape[0], dtype=int)

    # order vertices
    mmap[0] = p0
    for k in range(PP.shape[0] - 1):
        neig = np.where(PP[mmap[k], :])[0]
        mmap[k + 1] = neig[1] if neig[0] in mmap else neig[0]

    # cumulative length of edges
    l = np.linalg.norm(np.diff(mb.coordinates()[mmap, :], axis=0), axis=1)
    s = np.r_[0, np.cumsum(l)]

    if function is None:
        P1e = FiniteElement("CG", mb.ufl_cell(), 1)
        Ve = FunctionSpace(mb, P1e)
        function = Function(Ve)
        function.vector()[vertex_to_dof_map(Ve)[mmap]] = s
        return function
    else:
        Ve = function.function_space()
        function.vector()[vertex_to_dof_map(Ve)[mmap]] = s
Example #5
0
    def before_first_compute(self, data: dolfin.Function) -> None:
        """Create probes."""
        function_space = data.function_space()
        fs_dim = function_space.mesh().geometry().dim()
        point_dim = self._points.shape[-1]
        msg = "Point of dimension {point_dim} != function space dimension {fs_dim}".format(
            point_dim=point_dim,
            fs_dim=fs_dim,
        )
        assert fs_dim == point_dim, msg

        if self._spec.sub_field_index is not None:
            function_space = data.function_space().sub(
                self._spec.sub_field_index)
        else:
            function_space = data.function_space()
        self._probes = self._ft.Probes(self._points.flatten(), function_space)
Example #6
0
def split_function(vs: df.Function,
                   dim: int) -> tp.Tuple[df.Function, df.Function]:
    """Split a function into the first component and the rest."""
    if vs.function_space().ufl_element().num_sub_elements() == dim:
        v = vs[0]
        if dim == 2:
            s = vs[1]
        else:
            s = df.as_vector([vs[i] for i in range(1, dim)])
    else:
        v, s = df.split(vs)
    return v, s
Example #7
0
 def plot(self, **kwargs):
     func = self._fefunc
     # fix a bug in the fenics plot function that appears when
     # the maximum difference between data values is very small
     # compared to the magnitude of the data
     values = func.vector().array()
     diff = max(values) - min(values)
     magnitude = max(abs(values))
     if diff < magnitude * 1e-8:
         logger.warning("PLOT: function values differ only by tiny amount -> plotting as constant")
         func = Function(func.function_space())
         func.vector()[:] = values[0]
     plot(func, **kwargs)
Example #8
0
def mplot_function(function: df.Function,
                   vmin=None,
                   vmax=None,
                   shading="gouraud",
                   colourbar=False,
                   colourbar_label=None) -> Tuple[plt.Figure, Any]:
    """Plot a function. The kind of plot depends on the function."""
    mesh = function.function_space().mesh()
    if mesh.geometry().dim() != 2:
        raise AttributeError("Mesh must be 2D")

    fig, ax = plt.subplots(1)

    tpc = None
    # DG0 cellwise function
    if function.vector().size() == mesh.num_cells():
        colour_array = function.vector().array()
        tpc = ax.tripcolor(mesh2triang(mesh),
                           colour_array,
                           vmin=vmin,
                           vmax=vmax)

    # Scalar function, interpolated to vertices
    elif function.value_rank() == 0:
        colour_array = function.compute_vertex_values(mesh)
        tpc = ax.tripcolor(mesh2triang(mesh),
                           colour_array,
                           shading=shading,
                           vmin=vmin,
                           vmax=vmax)

    # Vector function, interpolated to vertices
    elif function.value_rank() == 1:
        vertex_values = function.compute_vertex_values(mesh)
        if len(vertex_values != 2 * mesh.num_vertices()):
            raise AttributeError("Vector field must be 2D")

        X = mesh.coordinates()[:, 0]
        Y = mesh.coordinates()[:, 1]
        U = vertex_values[:mesh.num_vertices()]
        V = vertex_values[mesh.num_vertices():]
        tpc = ax.quiver(X, Y, U, V)

    if colourbar and tpc is not None:
        cb = fig.colorbar(tpc)
        if colourbar_label is not None:
            cb.set_label(colourbar_label)

    return fig, ax
Example #9
0
def assign_restart_ic(receiving_function: df.Function,
                      assigning_func_iterator: Iterable[df.Function]) -> None:
    """Assign a seriess of functions to the `receiving_function`.

    This function is indended for use when restarting simulations, using previously computed
    solutions as initial conditions.
    """
    # Get receiving function space
    mixed_function_space = receiving_function.function_space()
    assigning_function_space = df.FunctionSpace(mixed_function_space.mesh(),
                                                "CG", 1)

    for subfunc_idx, assigning_sub_function in enumerate(
            assigning_func_iterator):
        assigner = df.FunctionAssigner(mixed_function_space.sub(subfunc_idx),
                                       assigning_function_space)
        assigner.assign(receiving_function.sub(subfunc_idx),
                        assigning_sub_function)
Example #10
0
    def update(self, timestep: int, time: float,
               data: dolfin.Function) -> None:
        """Update the data."""

        comm = df.MPI.comm_world
        rank = df.MPI.rank(comm)

        if not self.save_this_timestep(timestep, time):
            return

        if self.first_compute:  # Setup everything
            self.first_compute = False  # Do not do this again
            self.before_first_compute(data)

            # Update spec with element specifications
            spec_dict = self.spec._asdict()
            element = data.function_space().ufl_element()

            spec_dict["element_family"] = str(
                element.family())  # e.g. Lagrange
            spec_dict["element_degree"] = element.degree()

            plist = [tuple(map(float, p))
                     for p in self._points]  # TODO: Untested
            spec_dict["point"] = plist

            if rank == 0:
                self._path.mkdir(parents=False, exist_ok=True)
                store_metadata(
                    self.path / "metadata_{name}.yaml".format(name=self.name),
                    spec_dict)

        _data = self.compute(data)

        if rank == 0:
            with open(self.path / "probes_{name}.txt".format(name=self.name),
                      "a") as of_handle:
                if self._points.shape[0] == 1:
                    _data = (_data, )
                _data_format_str = ", ".join(("{}", ) * (len(_data) + 1))
                of_handle.write(_data_format_str.format(float(time), *_data))
                of_handle.write("\n")
Example #11
0
class MockVectorFunctionField(Field):
    def __init__(self, V, params=None):
        Field.__init__(self, params)
        self.f = Function(V)

    def before_first_compute(self, get):
        t = get('t')


        D = self.f.function_space().mesh().geometry().dim()
        if D == 2:
            self.expr = Expression(("1+x[0]*t", "3+x[1]*t"), degree=1, t=t)
        elif D == 3:
            self.expr = Expression(("1+x[0]*t", "3+x[1]*t", "10+x[2]*t"), degree=1, t=t)

    def compute(self, get):
        t = get('t')
        self.expr.t = t
        self.f.interpolate(self.expr)
        return self.f
Example #12
0
def new_assign_ic(receiving_function: df.Function,
                  ic_generator: NonuniformIC,
                  degree: int = 1) -> None:
    """
    Assign receiving_function(x, y) <- `ic_function`(x, y), for x, in the mesh.

    Arguments:
        receiving_function: The function which is assigned the initial condition.
        ic_function_tuple: A tuple of python callables which return the initial condition for each
            point (x, y). The number of functions must match the number of subfunctions 
            in `receiving_function`.
    """
    mixed_func_space = receiving_function.function_space()
    mesh = mixed_func_space.mesh()
    V = df.FunctionSpace(mesh, "CG", 1)  # TODO: infer this somehow

    class InitialConditionInterpolator(df.UserExpression):
        def __init__(self, **kwargs):
            super().__init__(kwargs)
            self._ic_func = None

        def set_interpolator(self, interpolation_function):
            self._ic_func = interpolation_function

        def eval(self, value, x):
            value[0] = self._ic_func(x[0])  # TODO: 1D for now

    ic_interpolator = InitialConditionInterpolator()

    # Copy functions to be able to assign to them
    functions = receiving_function.split(deepcopy=True)

    for i, (f, ic_func) in enumerate(zip(functions, ic_generator())):

        class IC(df.Expression):
            def eval(self, value, x):
                value[0] = ic_func(x[0])  # TODO: 1D for now

        ic = IC(degree=degree)
        assigner = df.FunctionAssigner(mixed_func_space.sub(i), V)
        assigner.assign(receiving_function.sub(i), df.project(ic, V))
Example #13
0
def assign_ic_subdomain(
        *,
        brain: CoupledBrainModel,
        vs_prev: df.Function,
        value: float,
        subdomain_id: int,
        subfunction_index: int
) -> None:
    """
    Compute a function with `value` in the subdomain corresponding to `subdomain_id`.
    Assign this function to subfunction `subfunction_index` of vs_prev.
    """
    mesh = brain._mesh
    cell_function = brain._cell_function

    dX = df.Measure("dx", domain=mesh, subdomain_data=cell_function)

    V = df.FunctionSpace(mesh, "DG", 0)
    u = df.TrialFunction(V)
    v = df.TestFunction(V)
    sol = df.Function(V)
    sol.vector().zero()     # Make sure it is initialised to zero

    F = -u*v*dX(subdomain_id) + df.Constant(value)*v*dX(subdomain_id)
    a = df.lhs(F)
    L = df.rhs(F)

    A = df.assemble(a, keep_diagonal=True)
    A.ident_zeros()
    b = df.assemble(L)
    solver = df.KrylovSolver("cg", "petsc_amg")
    solver.set_operator(A)
    solver.solve(sol.vector(), b)

    VCG = df.FunctionSpace(mesh, "CG", 1)
    v_new = df.Function(VCG)
    v_new.interpolate(sol)

    Vp = vs_prev.function_space().sub(subfunction_index)
    merger = df.FunctionAssigner(Vp, VCG)
    merger.assign(vs_prev.sub(subfunction_index), v_new)
Example #14
0
def eikonal_1d(mb, p0=0, function=None):
    "Compute distance from p0 on set of edges"

    # edge-to-vertex connectivity
    EE = np.zeros((mb.num_cells(), mb.num_vertices()), dtype=bool)
    for e in edges(mb):
        EE[e.index(), e.entities(0)] = True

    # vertex-to-vertex connectivity (via edges)
    PP = EE.T @ EE
    np.fill_diagonal(PP, False)

    # initial solution is inf everywhere
    sol = np.empty(PP.shape[0])
    sol.fill(np.inf)

    # initial conditions
    active = deque([p0])
    sol[p0] = 0.0

    # fast marching on edges
    x = mb.coordinates()
    while active:
        curr = active.pop()
        neig = np.where(PP[curr, :])[0]
        ll = sol[curr] + np.linalg.norm(x[neig, :] - x[curr, :], axis=1)
        up = neig[ll < sol[neig]]
        active.extend(up)
        sol[neig] = np.minimum(sol[neig], ll)

    # return solution
    if function is None:
        P1e = FiniteElement("CG", mb.ufl_cell(), 1)
        Ve = FunctionSpace(mb, P1e)
        function = Function(Ve)
        function.vector()[vertex_to_dof_map(Ve)] = sol
        return function
    else:
        Ve = function.function_space()
        function.vector()[vertex_to_dof_map(Ve)] = sol
Example #15
0
def interpolate_ic(time: Sequence[float],
                   data: np.ndarray,
                   receiving_function: df.Function,
                   boundaries: Iterable[np.ndarray],
                   wavespeed: float = 1.0) -> None:
    mixed_func_space = receiving_function.function_space()
    mesh = mixed_func_space.mesh()
    V = df.FunctionSpace(mesh, "CG", 1)  # TODO: infer this somehow

    class InitialConditionInterpolator(df.UserExpression):
        def __init__(self, **kwargs):
            super().__init__(kwargs)
            self._ic_func = None

            self._nearest_edge_interpolator = NearestEdgeTree(boundaries)

        def set_interpolator(self, interpolation_function):
            self._ic_func = interpolation_function

        def eval(self, value, x):
            _, r = self._nearest_edge_interpolator.query(x)
            value[0] = self._ic_func(r / wavespeed)  # TODO: 1D for now
            # value[0] = r
            # value[0] = self._ic_func(x[0]/wavespeed)    # TODO: 1D for now

    ic_interpolator = InitialConditionInterpolator()

    # Copy functions to be able to assign to them
    subfunction_copy = receiving_function.split(deepcopy=True)
    for i, f in enumerate(subfunction_copy):
        # from IPython import embed; embed()
        # assert False
        ic_interpolator.set_interpolator(
            lambda x: np.interp(x, time, data[i, :]))
        assigner = df.FunctionAssigner(mixed_func_space.sub(i), V)
        assigner.assign(receiving_function.sub(i),
                        df.project(ic_interpolator, V))
Example #16
0
def function_extend_or_restrict(function,
                                function_components,
                                V,
                                V_components,
                                weight,
                                copy,
                                extended_or_restricted_function=None):
    function_V = function.function_space()
    if function_components is not None:
        assert isinstance(function_components, (int, str, tuple))
        assert not isinstance(
            function_components, list
        ), "dolfin does not handle yet the case of a list of components"
        if isinstance(function_components, str):
            function_components = function_V.component_to_index(
                function_components)
        if not isinstance(function_components, tuple):
            function_V_index = (function_components, )
        else:
            function_V_index = function_components
    else:
        function_V_index = None
    if V_components is not None:
        assert isinstance(V_components, (int, str, tuple))
        assert not isinstance(
            V_components, list
        ), "dolfin does not handle yet the case of a list of components"
        if isinstance(V_components, str):
            V_components = V.component_to_index(V_components)
        if not isinstance(V_components, tuple):
            V_index = (V_components, )
        else:
            V_index = V_components
    else:
        V_index = None

    V_to_function_V_mapping = dict()
    function_V_to_V_mapping = dict()

    if _function_spaces_eq(function_V, V, function_V_index, V_index):
        # Then function_V == V: do not need to extend nor restrict input function
        # Example of use case: function is the solution of an elliptic problem, V is the truth space
        if not copy:
            assert function_components is None, "It is not possible to extract function components without copying the vector"
            assert V_components is None, "It is not possible to extract function components without copying the vector"
            assert weight is None, "It is not possible to weigh components without copying the vector"
            assert extended_or_restricted_function is None, "It is not possible to provide an output function without copying the vector"
            return function
        else:
            if extended_or_restricted_function is None:
                output = Function(V)  # zero by default
            else:
                output = extended_or_restricted_function
                assert output.function_space() == V
            assign(_sub_from_tuple(output, V_index),
                   _sub_from_tuple(function, function_V_index))
            if weight is not None:
                output.vector()[:] *= weight
            return output
    elif _function_spaces_lt(function_V, V, V_to_function_V_mapping,
                             function_V_index, V_index):
        # Then function_V < V: need to extend input function
        # Example of use case: function is the solution of the supremizer problem of a Stokes problem,
        # V is the mixed (velocity, pressure) space, and you are interested in storing a extended function
        # (i.e. extended to zero on pressure DOFs) when defining basis functions for enriched velocity space
        assert copy is True, "It is not possible to extend functions without copying the vector"
        if extended_or_restricted_function is None:
            extended_function = Function(V)  # zero by default
        else:
            extended_function = extended_or_restricted_function
            assert extended_function.function_space() == V
        for (index_V_as_tuple,
             index_function_V_as_tuple) in V_to_function_V_mapping.items():
            assign(_sub_from_tuple(extended_function, index_V_as_tuple),
                   _sub_from_tuple(function, index_function_V_as_tuple))
        if weight is not None:
            extended_function.vector()[:] *= weight
        return extended_function
    elif _function_spaces_gt(function_V, V, function_V_to_V_mapping,
                             function_V_index, V_index):
        # Then function_V > V: need to restrict input function
        # Example of use case: function = (y, u, p) is the solution of an elliptic optimal control problem,
        # V is the collapsed state (== adjoint) solution space, and you are
        # interested in storing snapshots of y or p components because of an aggregrated approach
        assert copy is True, "It is not possible to restrict functions without copying the vector"
        if extended_or_restricted_function is None:
            restricted_function = Function(V)  # zero by default
        else:
            restricted_function = extended_or_restricted_function
            assert restricted_function.function_space() == V
        for (index_function_V_as_tuple,
             index_V_as_tuple) in function_V_to_V_mapping.items():
            assign(_sub_from_tuple(restricted_function, index_V_as_tuple),
                   _sub_from_tuple(function, index_function_V_as_tuple))
        if weight is not None:
            restricted_function.vector()[:] *= weight
        return restricted_function
Example #17
0
def get_mpi_comm(function: Function):
    return get_mpi_comm(function.function_space())
Example #18
0
def get_function_subspace(function: Function,
                          component: (int, list_of(str), str, tuple_of(int))):
    return get_function_subspace(function.function_space(), component)
class ObjectiveFunctional(LinearOperator):
    """
    Provides data misfit, gradient and Hessian information for the data misfit
    part of a time-independent symmetric inverse problem.
    """
    __metaclass__ = abc.ABCMeta

    # Instantiation
    def __init__(self, V, Vm, bc, bcadj, \
    RHSinput=[], ObsOp=[], UD=[], Regul=[], Data=[], plot=False, \
    mycomm=None):
        # Define test, trial and all other functions
        self.trial = TrialFunction(V)
        self.test = TestFunction(V)
        self.mtrial = TrialFunction(Vm)
        self.mtest = TestFunction(Vm)
        self.rhs = Function(V)
        self.m = Function(Vm)
        self.mcopy = Function(Vm)
        self.srchdir = Function(Vm)
        self.delta_m = Function(Vm)
        self.MG = Function(Vm)
        self.Grad = Function(Vm)
        self.Gradnorm = 0.0
        self.lenm = len(self.m.vector().array())
        self.u = Function(V)
        self.ud = Function(V)
        self.diff = Function(V)
        self.p = Function(V)
        # Define weak forms to assemble A, C and E
        self._wkforma()
        self._wkformc()
        self._wkforme()
        # Store other info:
        self.ObsOp = ObsOp
        self.UD = UD
        self.reset()    # Initialize U, C and E to []
        self.Data = Data
        self.GN = 1.0   # GN = 0.0 => GN Hessian; = 1.0 => full Hessian
        # Operators and bc
        LinearOperator.__init__(self, self.delta_m.vector(), \
        self.delta_m.vector()) 
        self.bc = bc
        self.bcadj = bcadj
        self._assemble_solverM(Vm)
        self.assemble_A()
        self.assemble_RHS(RHSinput)
        self.Regul = Regul
        # Counters, tolerances and others
        self.nbPDEsolves = 0    # Updated when solve_A called
        self.nbfwdsolves = 0    # Counter for plots
        self.nbadjsolves = 0    # Counter for plots
        self._set_plots(plot)
        # MPI:
        self.mycomm = mycomm
        try:
            self.myrank = MPI.rank(self.mycomm)
        except:
            self.myrank = 0

    def copy(self):
        """Define a copy method"""
        V = self.trial.function_space()
        Vm = self.mtrial.function_space()
        newobj = self.__class__(V, Vm, self.bc, self.bcadj, [], self.ObsOp, \
        self.UD, self.Regul, self.Data, False)
        newobj.RHS = self.RHS
        newobj.update_m(self.m)
        return newobj

    def mult(self, mhat, y):
        """mult(self, mhat, y): do y = Hessian * mhat
        member self.GN sets full Hessian (=1.0) or GN Hessian (=0.0)"""
        N = self.Nbsrc # Number of sources
        y[:] = np.zeros(self.lenm)
        for C, E in zip(self.C, self.E):
            # Solve for uhat
            C.transpmult(mhat, self.rhs.vector())
            self.bcadj.apply(self.rhs.vector())
            self.solve_A(self.u.vector(), -self.rhs.vector())
            # Solve for phat
            E.transpmult(mhat, self.rhs.vector())
            Etmhat = self.rhs.vector().array()
            self.rhs.vector().axpy(1.0, self.ObsOp.incradj(self.u))
            self.bcadj.apply(self.rhs.vector())
            self.solve_A(self.p.vector(), -self.rhs.vector())
            # Compute Hessian*x:
            y.axpy(1.0/N, C * self.p.vector())
            y.axpy(self.GN/N, E * self.u.vector())
        y.axpy(1.0, self.Regul.hessian(mhat))

    # Getters
    def getm(self): return self.m
    def getmarray(self):    return self.m.vector().array()
    def getmcopyarray(self):    return self.mcopy.vector().array()
    def getVm(self):    return self.mtrial.function_space()
    def getMGarray(self):   return self.MG.vector().array()
    def getMGvec(self):   return self.MG.vector()
    def getGradarray(self):   return self.Grad.vector().array()
    def getGradnorm(self):  return self.Gradnorm
    def getsrchdirarray(self):    return self.srchdir.vector().array()
    def getsrchdirvec(self):    return self.srchdir.vector()
    def getsrchdirnorm(self):
        return np.sqrt((self.MM*self.getsrchdirvec()).inner(self.getsrchdirvec()))
    def getgradxdir(self): return self.gradxdir
    def getcost(self):  return self.cost, self.misfit, self.regul
    def getprecond(self):
        Prec = PETScKrylovSolver("richardson", "amg")
        Prec.parameters["maximum_iterations"] = 1
        Prec.parameters["error_on_nonconvergence"] = False
        Prec.parameters["nonzero_initial_guess"] = False
        Prec.set_operator(self.Regul.get_precond())
        return Prec
    def getMass(self):    return self.MM

    # Setters
    def setsrchdir(self, arr):  self.srchdir.vector()[:] = arr
    def setgradxdir(self, valueloc):   
        """Sum all local results for Grad . Srch_dir"""
        try:
            valueglob = MPI.sum(self.mycomm, valueloc)
        except:
            valueglob = valueloc
        self.gradxdir = valueglob

    # Solve
    def solvefwd(self, cost=False):
        """Solve fwd operators for given RHS"""
        self.nbfwdsolves += 1
        if self.ObsOp.noise:    self.noise = 0.0
        if self.plot:
            self.plotu = PlotFenics(self.plotoutdir)
            self.plotu.set_varname('u{0}'.format(self.nbfwdsolves))
        if cost:    self.misfit = 0.0
        for ii, rhs in enumerate(self.RHS):
            self.solve_A(self.u.vector(), rhs)
            if self.plot:   self.plotu.plot_vtk(self.u, ii)
            u_obs, noiselevel = self.ObsOp.obs(self.u)
            self.U.append(u_obs)
            if self.ObsOp.noise:    self.noise += noiselevel
            if cost:
                self.misfit += self.ObsOp.costfct(u_obs, self.UD[ii])
            self.C.append(assemble(self.c))
        if cost:
            self.misfit /= len(self.U)
            self.regul = self.Regul.cost(self.m)
            self.cost = self.misfit + self.regul
        if self.ObsOp.noise and self.myrank == 0:
            print 'Total noise in data misfit={:.5e}\n'.\
            format(self.noise*.5/len(self.U))
            self.ObsOp.noise = False    # Safety
        if self.plot:   self.plotu.gather_vtkplots()

    def solvefwd_cost(self):
        """Solve fwd operators for given RHS and compute cost fct"""
        self.solvefwd(True)

    def solveadj(self, grad=False):
        """Solve adj operators"""
        self.nbadjsolves += 1
        if self.plot:
            self.plotp = PlotFenics(self.plotoutdir)
            self.plotp.set_varname('p{0}'.format(self.nbadjsolves))
        self.Nbsrc = len(self.UD)
        if grad:    self.MG.vector()[:] = np.zeros(self.lenm)
        for ii, C in enumerate(self.C):
            self.ObsOp.assemble_rhsadj(self.U[ii], self.UD[ii], \
            self.rhs, self.bcadj)
            self.solve_A(self.p.vector(), self.rhs.vector())
            if self.plot:   self.plotp.plot_vtk(self.p, ii)
            self.E.append(assemble(self.e))
            if grad:    self.MG.vector().axpy(1.0/self.Nbsrc, \
                        C * self.p.vector())
        if grad:
            self.MG.vector().axpy(1.0, self.Regul.grad(self.m))
            self.solverM.solve(self.Grad.vector(), self.MG.vector())
            self.Gradnorm = np.sqrt(self.Grad.vector().inner(self.MG.vector()))
        if self.plot:   self.plotp.gather_vtkplots()

    def solveadj_constructgrad(self):
        """Solve adj operators and assemble gradient"""
        self.solveadj(True)

    # Assembler
    def assemble_A(self):
        """Assemble operator A(m)"""
        self.A = assemble(self.a)
        self.bc.apply(self.A)
        self.set_solver()

    def solve_A(self, b, f):
        """Solve system of the form A.b = f, 
        with b and f in form to be used in solver."""
        self.solver.solve(b, f)
        self.nbPDEsolves += 1

    def assemble_RHS(self, RHSin):
        """Assemble RHS for fwd solve"""
        if RHSin == []: self.RHS = None
        else:
            self.RHS = []
            for rhs in RHSin:
                if isinstance(rhs, Expression):
                    L = rhs*self.test*dx
                    b = assemble(L)
                    self.bc.apply(b)
                    self.RHS.append(b)
                else:   raise WrongInstanceError("rhs should be Expression")

    def _assemble_solverM(self, Vm):
        self.MM = assemble(inner(self.mtrial, self.mtest)*dx)
        self.solverM = LUSolver()
        self.solverM.parameters['reuse_factorization'] = True
        self.solverM.parameters['symmetric'] = True
        self.solverM.set_operator(self.MM)

    def _set_plots(self, plot):
        self.plot = plot
        if self.plot:
            filename, ext = splitext(sys.argv[0])
            self.plotoutdir = filename + '/Plots/'
            self.plotvarm = PlotFenics(self.plotoutdir)
            self.plotvarm.set_varname('m')

    def plotm(self, index):
        if self.plot:   self.plotvarm.plot_vtk(self.m, index)

    def gatherm(self):
        if self.plot:   self.plotvarm.gather_vtkplots()

    # Update param
    def update_Data(self, Data):
        """Update Data member"""
        self.Data = Data
        self.assemble_A()
        self.reset()

    def update_m(self, m):
        """Update values of parameter m"""
        if isinstance(m, np.ndarray):
            self.m.vector()[:] = m
        elif isinstance(m, Function):
            self.m.assign(m)
        elif isinstance(m, float):
            self.m.vector()[:] = m
        elif isinstance(m, int):
            self.m.vector()[:] = float(m)
        else:   raise WrongInstanceError('Format for m not accepted')
        self.assemble_A()
        self.reset()

    def backup_m(self):
        self.mcopy.assign(self.m)

    def restore_m(self):
        self.update_m(self.mcopy)

    def reset(self):
        """Reset U, C and E"""
        self.U = []
        self.C = []
        self.E = []

    def set_solver(self):
        """Reset solver for fwd operator"""
        self.solver = LUSolver()
        self.solver.parameters['reuse_factorization'] = True
        self.solver.set_operator(self.A)

    def addPDEcount(self, increment=1):
        """Increase 'nbPDEsolves' by 'increment'"""
        self.nbPDEsolves += increment

    def resetPDEsolves(self):
        self.nbPDEsolves = 0

    # Additional methods for compatibility with CG solver:
    def init_vector(self, x, dim):
        """Initialize vector x to be compatible with parameter
         Does not work in dolfin 1.3.0"""
        self.MM.init_vector(x, 0)

    def init_vector130(self):
        """Initialize vector x to be compatible with parameter"""
        return Vector(Function(self.mcopy.function_space()).vector())

    # Abstract methods
    @abc.abstractmethod
    def _wkforma(self): self.a = []

    @abc.abstractmethod
    def _wkformc(self): self.c = []

    @abc.abstractmethod
    def _wkforme(self): self.e = []
Example #20
0
def testA():
    print "TEST A"
    quadrature_degree_old = parameters["form_compiler"]["quadrature_degree"]
      
    # setup problem
    mesh1 = UnitSquare(5, 5)
    V1 = FunctionSpace(mesh1, 'CG', 1)
    mesh2 = UnitSquare(10, 10)
    V2 = FunctionSpace(mesh2, 'CG', 1)
    u1 = TrialFunction(V1)
    v1 = TestFunction(V1)
    u2 = TrialFunction(V2)
    v2 = TestFunction(V2)
    f = Constant(1.0)

    # define boundary conditions
#    bc = DirichletBC(V, 0.0, "near(x[0], 0.0) || near(x[0], 1.0)")
    def u0_boundary(x, on_boundary):
        return (x[0] <= DOLFIN_EPS - 1.0 or (x[0] >= 0.0 - DOLFIN_EPS and x[1] < 1.0 - DOLFIN_EPS)) and on_boundary
    #    return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS
    u0 = Constant(0.0)
    bc1 = DirichletBC(V1, u0, u0_boundary)
    bc2 = DirichletBC(V2, u0, u0_boundary)

    # solution vector 
    u1_h = Function(V1)
    u2_h = Function(V2)

    # iterate expressions
    for run in range(2):
        if run == 1:
            u1_h.vector()[:] = np.ones(u1_h.function_space().dim())     # used for integration in form
        for j, ex in enumerate(EX):
            for qdegree in range(quadrature_degree):
                if qdegree == 0:
                    qdegree = -1
                parameters["form_compiler"]["quadrature_degree"] = qdegree
    
                # forms
    #            b1 = f * v1 * dx
    #            b2 = f * v2 * dx
    #            b1 = ex * v1 * dx
    #            b2 = ex * v2 * dx
                b1 = inner(nabla_grad(ex), nabla_grad(ex)) * v1 * dx
                b2 = inner(nabla_grad(ex), nabla_grad(ex)) * v2 * dx
                f1 = inner(nabla_grad(ex), nabla_grad(ex)) * u1_h * dx
                f2 = inner(nabla_grad(ex), nabla_grad(ex)) * u1_h * dx
    
                a1 = inner(nabla_grad(u1), nabla_grad(v1)) * dx
                a2 = inner(nabla_grad(u2), nabla_grad(v2)) * dx
    #            a1 = ex * inner(nabla_grad(u1), nabla_grad(v1)) * dx
    #            a2 = ex * inner(nabla_grad(u2), nabla_grad(v2)) * dx
    #            a1 = inner(nabla_grad(ex), nabla_grad(ex)) * inner(nabla_grad(u1), nabla_grad(v1)) * dx
    #            a2 = inner(nabla_grad(ex), nabla_grad(ex)) * inner(nabla_grad(u2), nabla_grad(v2)) * dx
    
                # compute solution
                if run == 0:
                    solve(a1 == b1, u1_h, bc1)
                    solve(a2 == b2, u2_h, bc2)
                    print "[V1] norms for quad=", qdegree, "ex", j, ":", norm(u1_h, 'L2'), norm(u1_h, 'H1')
                    print "[V2] norms for quad=", qdegree, "ex", j, ":", norm(u2_h, 'L2'), norm(u2_h, 'H1')
                                
                # compute norms
                if run == 1:
                    N1 = sqrt(assemble(f1))
                    N2 = sqrt(assemble(f2))
                    print "[V1] norm for quad=", qdegree, "ex", j, ":", N1
                    print "[V2] norm for quad=", qdegree, "ex", j, ":", N2
    
            print "-------------------------------------------------------------------------"
    parameters["form_compiler"]["quadrature_degree"] = quadrature_degree_old
Example #21
0
class ObjectiveFunctional(LinearOperator):
    """
    Provides data misfit, gradient and Hessian information for the data misfit
    part of a time-independent symmetric inverse problem.
    """
    __metaclass__ = abc.ABCMeta

    # Instantiation
    def __init__(self, V, Vm, bc, bcadj, \
    RHSinput=[], ObsOp=[], UD=[], Regul=[], Data=[], plot=False, \
    mycomm=None):
        # Define test, trial and all other functions
        self.trial = TrialFunction(V)
        self.test = TestFunction(V)
        self.mtrial = TrialFunction(Vm)
        self.mtest = TestFunction(Vm)
        self.rhs = Function(V)
        self.m = Function(Vm)
        self.mcopy = Function(Vm)
        self.srchdir = Function(Vm)
        self.delta_m = Function(Vm)
        self.MG = Function(Vm)
        self.MGv = self.MG.vector()
        self.Grad = Function(Vm)
        self.Gradnorm = 0.0
        self.lenm = len(self.m.vector().array())
        self.u = Function(V)
        self.ud = Function(V)
        self.diff = Function(V)
        self.p = Function(V)
        # Store other info:
        self.ObsOp = ObsOp
        self.UD = UD
        self.reset()  # Initialize U, C and E to []
        self.Data = Data
        self.GN = 1.0  # GN = 0.0 => GN Hessian; = 1.0 => full Hessian
        # Define weak forms to assemble A, C and E
        self._wkforma()
        self._wkformc()
        self._wkforme()
        # Operators and bc
        LinearOperator.__init__(self, self.delta_m.vector(), \
        self.delta_m.vector())
        self.bc = bc
        self.bcadj = bcadj
        self._assemble_solverM(Vm)
        self.assemble_A()
        self.assemble_RHS(RHSinput)
        self.Regul = Regul
        self.regparam = 1.0
        if Regul != []:
            self.PD = self.Regul.isPD()
        # Counters, tolerances and others
        self.nbPDEsolves = 0  # Updated when solve_A called
        self.nbfwdsolves = 0  # Counter for plots
        self.nbadjsolves = 0  # Counter for plots
        # MPI:
        self.mycomm = mycomm

    def copy(self):
        """Define a copy method"""

        V = self.trial.function_space()
        Vm = self.mtrial.function_space()
        newobj = self.__class__(V, Vm, self.bc, self.bcadj, [], self.ObsOp, \
        self.UD, self.Regul, self.Data, False)
        newobj.RHS = self.RHS
        newobj.update_m(self.m)
        return newobj

    def mult(self, mhat, y):
        """mult(self, mhat, y): do y = Hessian * mhat
        member self.GN sets full Hessian (=1.0) or GN Hessian (=0.0)"""

        N = self.Nbsrc  # Number of sources
        y[:] = np.zeros(self.lenm)

        for C, E in zip(self.C, self.E):
            C.transpmult(mhat, self.rhs.vector())
            if self.bcadj is not None:
                self.bcadj.apply(self.rhs.vector())
            self.solve_A(self.u.vector(), -self.rhs.vector())

            E.transpmult(mhat, self.rhs.vector())
            Etmhat = self.rhs.vector().array()
            self.rhs.vector().axpy(1.0, self.ObsOp.incradj(self.u))
            if self.bcadj is not None:
                self.bcadj.apply(self.rhs.vector())
            self.solve_A(self.p.vector(), -self.rhs.vector())

            y.axpy(1.0 / N, C * self.p.vector())
            y.axpy(self.GN / N, E * self.u.vector())

        y.axpy(self.regparam, self.Regul.hessian(mhat))

    # Getters
    def getm(self):
        return self.m

    def getmarray(self):
        return self.m.vector().array()

    def getmcopyarray(self):
        return self.mcopy.vector().array()

    def getVm(self):
        return self.mtrial.function_space()

    def getMGarray(self):
        return self.MG.vector().array()

    def getMGvec(self):
        return self.MGv

    def getGradarray(self):
        return self.Grad.vector().array()

    def getGradnorm(self):
        return self.Gradnorm

    def getsrchdirarray(self):
        return self.srchdir.vector().array()

    def getsrchdirvec(self):
        return self.srchdir.vector()

    def getsrchdirnorm(self):
        return np.sqrt(
            (self.MM * self.getsrchdirvec()).inner(self.getsrchdirvec()))

    def getgradxdir(self):
        return self.gradxdir

    def getcost(self):
        return self.cost, self.misfit, self.regul

    def getprecond(self):
        return self.Regul.getprecond()
#        Prec = PETScKrylovSolver("richardson", "amg")
#        Prec.parameters["maximum_iterations"] = 1
#        Prec.parameters["error_on_nonconvergence"] = False
#        Prec.parameters["nonzero_initial_guess"] = False
#        Prec.set_operator(self.Regul.get_precond())
#        return Prec

    def getMass(self):
        return self.MM

    # Setters
    def setsrchdir(self, arr):
        self.srchdir.vector()[:] = arr

    def setgradxdir(self, valueloc):
        """Sum all local results for Grad . Srch_dir"""
        try:
            valueglob = MPI.sum(self.mycomm, valueloc)
        except:
            valueglob = valueloc
        self.gradxdir = valueglob

    # Solve
    def solvefwd(self, cost=False):
        """Solve fwd operators for given RHS"""

        self.nbfwdsolves += 1
        if cost: self.misfit = 0.0
        self.U = []
        self.C = []
        for ii, rhs in enumerate(self.RHS):
            self.solve_A(self.u.vector(), rhs)
            u_obs, noiselevel = self.ObsOp.obs(self.u)
            self.U.append(u_obs)
            if cost:
                self.misfit += self.ObsOp.costfct(u_obs, self.UD[ii])
            self.C.append(assemble(self.c))
        if cost:
            self.misfit /= len(self.U)
            self.regul = self.Regul.cost(self.m)
            self.cost = self.misfit + self.regparam * self.regul

    def solvefwd_cost(self):
        """Solve fwd operators for given RHS and compute cost fct"""

        self.solvefwd(True)

    def solveadj(self, grad=False):
        """Solve adj operators"""

        self.nbadjsolves += 1
        self.Nbsrc = len(self.UD)
        if grad:
            self.MG.vector().zero()
        self.E = []

        for ii, C in enumerate(self.C):
            self.ObsOp.assemble_rhsadj(self.U[ii], self.UD[ii], \
            self.rhs, self.bcadj)
            self.solve_A(self.p.vector(), self.rhs.vector())
            self.E.append(assemble(self.e))
            if grad:
                self.MG.vector().axpy(1.0 / self.Nbsrc, C * self.p.vector())

        if grad:
            self.MG.vector().axpy(self.regparam, self.Regul.grad(self.m))
            self.solverM.solve(self.Grad.vector(), self.MG.vector())
            self.Gradnorm = np.sqrt(self.Grad.vector().inner(self.MG.vector()))

    def solveadj_constructgrad(self):
        """Solve adj operators and assemble gradient"""

        self.solveadj(True)

    # Assembler
    def assemble_A(self):
        """Assemble operator A(m)"""

        self.A = assemble(self.a)
        if self.bc is not None:
            self.bc.apply(self.A)
        compute_eigfenics(self.A, 'eigA.txt')
        self.set_solver()

    def solve_A(self, b, f):
        """Solve system of the form A.b = f, 
        with b and f in form to be used in solver."""

        self.solver.solve(b, f)
        self.nbPDEsolves += 1

    def assemble_RHS(self, RHSin):
        """Assemble RHS for fwd solve"""

        if RHSin == []: self.RHS = None
        else:
            self.RHS = []
            for rhs in RHSin:
                if isinstance(rhs, Expression):
                    L = rhs * self.test * dx
                    b = assemble(L)
                    if self.bc is not None:
                        self.bc.apply(b)
                    self.RHS.append(b)
                elif isinstance(rhs, GenericVector):
                    self.RHS.append(rhs)
                else:
                    raise WrongInstanceError(
                        "rhs should be an Expression or a GenericVector")

    def _assemble_solverM(self, Vm):

        self.MM = assemble(inner(self.mtrial, self.mtest) * dx)
        self.solverM = PETScKrylovSolver('cg', 'jacobi')
        self.solverM.parameters["maximum_iterations"] = 1000
        self.solverM.parameters["relative_tolerance"] = 1e-12
        self.solverM.parameters["error_on_nonconvergence"] = True
        self.solverM.parameters["nonzero_initial_guess"] = False
        #        self.solverM = LUSolver()
        #        self.solverM.parameters['reuse_factorization'] = True
        #        self.solverM.parameters['symmetric'] = True
        self.solverM.set_operator(self.MM)

    # Update param
    def update_Data(self, Data):
        """Update Data member"""

        self.Data = Data
        self.assemble_A()
        self.reset()

    def update_m(self, m):
        """Update values of parameter m"""

        if isinstance(m, np.ndarray):
            self.m.vector()[:] = m
        elif isinstance(m, Function):
            self.m.assign(m)
        elif isinstance(m, float):
            self.m.vector()[:] = m
        elif isinstance(m, int):
            self.m.vector()[:] = float(m)
        else:
            raise WrongInstanceError('Format for m not accepted')
        self.assemble_A()
        self.reset()

    def backup_m(self):
        self.mcopy.assign(self.m)

    def restore_m(self):
        self.update_m(self.mcopy)

    def reset(self):
        """Reset U, C and E"""
        self.U = []
        self.C = []
        self.E = []

    def set_solver(self):
        """Reset solver for fwd operator"""

        #self.solver = LUSolver()
        #self.solver.parameters['reuse_factorization'] = True
        self.solver = PETScKrylovSolver("cg", "amg")
        self.solver.parameters["maximum_iterations"] = 1000
        self.solver.parameters["relative_tolerance"] = 1e-12
        self.solver.parameters["error_on_nonconvergence"] = True
        self.solver.parameters["nonzero_initial_guess"] = False
        self.solver.set_operator(self.A)

    def addPDEcount(self, increment=1):
        """Increase 'nbPDEsolves' by 'increment'"""
        self.nbPDEsolves += increment

    def resetPDEsolves(self):
        self.nbPDEsolves = 0

    # Additional methods for compatibility with CG solver:
    def init_vector(self, x, dim):
        """Initialize vector x to be compatible with parameter
         Does not work in dolfin 1.3.0"""
        self.MM.init_vector(x, 0)

    def init_vector130(self):
        """Initialize vector x to be compatible with parameter"""
        return Vector(Function(self.mcopy.function_space()).vector())

    # Abstract methods
    @abc.abstractmethod
    def _wkforma(self):
        self.a = []

    @abc.abstractmethod
    def _wkformc(self):
        self.c = []

    @abc.abstractmethod
    def _wkforme(self):
        self.e = []


    def inversion(self, initial_medium, target_medium, mpicomm, \
    parameters_in=[], myplot=None):
        """ solve inverse problem with that objective function """

        parameters = {'tolgrad':1e-10, 'tolcost':1e-14, 'maxnbNewtiter':50, \
        'maxtolcg':0.5}
        parameters.update(parameters_in)
        maxnbNewtiter = parameters['maxnbNewtiter']
        tolgrad = parameters['tolgrad']
        tolcost = parameters['tolcost']
        tolcg = parameters['maxtolcg']
        mpirank = MPI.rank(mpicomm)

        self.update_m(initial_medium)
        self._plotm(myplot, 'init')

        if mpirank == 0:
            print '\t{:12s} {:10s} {:12s} {:12s} {:12s} {:10s} \t{:10s} {:12s} {:12s}'.format(\
            'iter', 'cost', 'misfit', 'reg', '|G|', 'medmisf', 'a_ls', 'tol_cg', 'n_cg')
        dtruenorm = np.sqrt(target_medium.vector().\
        inner(self.MM*target_medium.vector()))

        self.solvefwd_cost()
        for it in xrange(maxnbNewtiter):
            self.solveadj_constructgrad()  # compute gradient

            if it == 0: gradnorm0 = self.Gradnorm
            diff = self.m.vector() - target_medium.vector()
            medmisfit = np.sqrt(diff.inner(self.MM * diff))
            if mpirank == 0:
                print '{:12d} {:12.4e} {:12.2e} {:12.2e} {:11.4e} {:10.2e} ({:4.2f})'.\
                format(it, self.cost, self.misfit, self.regul, \
                self.Gradnorm, medmisfit, medmisfit/dtruenorm),
            self._plotm(myplot, str(it))
            self._plotgrad(myplot, str(it))

            if self.Gradnorm < gradnorm0 * tolgrad or self.Gradnorm < 1e-12:
                if mpirank == 0:
                    print '\nGradient sufficiently reduced -- optimization stopped'
                break

            # Compute search direction:
            tolcg = min(tolcg, np.sqrt(self.Gradnorm / gradnorm0))
            self.assemble_hessian()  # for regularization
            cgiter, cgres, cgid, tolcg = compute_searchdirection(
                self, 'Newt', tolcg)
            self._plotsrchdir(myplot, str(it))

            # Line search:
            cost_old = self.cost
            statusLS, LScount, alpha = bcktrcklinesearch(self, 12)
            if mpirank == 0:
                print '{:11.3f} {:12.2e} {:10d}'.format(alpha, tolcg, cgiter)
            if self.PD: self.Regul.update_w(self.srchdir.vector(), alpha)

            if np.abs(self.cost - cost_old) / np.abs(cost_old) < tolcost:
                if mpirank == 0:
                    if tolcg < 1e-14:
                        print 'Cost function stagnates -- optimization aborted'
                        break
                    tolcg = 0.001 * tolcg

    def assemble_hessian(self):
        self.Regul.assemble_hessian(self.m)

    def _plotm(self, myplot, index):
        """ plot media during inversion """
        if not myplot == None:
            myplot.set_varname('m' + index)
            myplot.plot_vtk(self.m)

    def _plotgrad(self, myplot, index):
        """ plot grad during inversion """
        if not myplot == None:
            myplot.set_varname('Grad_m' + index)
            myplot.plot_vtk(self.Grad)

    def _plotsrchdir(self, myplot, index):
        """ plot srchdir during inversion """
        if not myplot == None:
            myplot.set_varname('srchdir_m' + index)
            myplot.plot_vtk(self.srchdir)
Example #22
0
class OSI(Field):

    @classmethod
    def default_params(cls):
        params = Field.default_params()
        params.update(
            finalize=True,
            )
        return params

    def add_fields(self):
        params = self.params.copy_recursive()
        params["save"] = False
        params["plot"] = False
        #params["callback"] = False
        #params.pop("finalize")

        fields = []
        #fields.append(WSS(params=params))
        #return fields

        f = TimeIntegral("WSS", params=params, label="OSI")
        fields.append(f)
        fields.append(Magnitude(f, params=params))

        f = Magnitude("WSS", params=params)
        fields.append(f)
        fields.append(TimeIntegral(f, params=params, label="OSI"))

        #f = TimeIntegral("WSS", label="OSI")
        #fields.append(f)
        #fields.append(Magnitude(f))

        #f = Magnitude("WSS")
        #fields.append(f)
        #fields.append(TimeIntegral(f, label="OSI"))

        return fields

    def before_first_compute(self, get):
        tau = get("WSS")
        self.osi = Function(tau.sub(0).function_space().collapse())

    def compute(self, get):
        # Requires the fields Magnitude(TimeIntegral("WSS", label="OSI")) and
        # TimeIntegral(Magnitude("WSS"), label="OSI")
        #self.mag_ta_wss = get("Magnitude_TimeIntegral_WSS_OSI")
        #self.ta_mag_wss = get("TimeIntegral_Magnitude_WSS_OSI")
        self.mag_ta_wss = get("Magnitude_TimeIntegral_WSS-OSI")
        self.ta_mag_wss = get("TimeIntegral_Magnitude_WSS-OSI")

        if self.params.finalize:
            return None
        elif self.mag_ta_wss == None or self.ta_mag_wss == None:
            return None
        else:
            expr = conditional(self.ta_mag_wss < 1e-15,
                               0.0,
                               0.5 * (1.0 - self.mag_ta_wss / self.ta_mag_wss))
            self.osi.assign(project(expr, self.osi.function_space()))
            return self.osi

    def after_last_compute(self, get):
        self.mag_ta_wss = get("Magnitude_TimeIntegral_WSS-OSI")
        self.ta_mag_wss = get("TimeIntegral_Magnitude_WSS-OSI")
        #print self.name, " Calling after_last_compute"

        expr = conditional(self.ta_mag_wss < 1e-15,
                           0.0,
                           0.5 * (1.0 - self.mag_ta_wss / self.ta_mag_wss))
        self.osi.assign(project(expr, self.osi.function_space()))

        return self.osi
Example #23
0
class BlendedAlgebraicVofModel(VOFMixin, MultiPhaseModel):
    description = 'A blended algebraic VOF scheme implementing HRIC/CICSAM type schemes'

    def __init__(self, simulation):
        """
        A blended algebraic VOF scheme works by using a specific
        convection scheme in the advection of the colour function
        that ensures a sharp interface.

        * The convection scheme should be the name of a convection
          scheme that is tailored for advection of the colour
          function, i.e "HRIC", "MHRIC", "RHRIC" etc,
        * The velocity field should be divergence free

        The colour function is unity when rho=rho0 and nu=nu0 and
        zero when rho=rho1 and nu=nu1
        """
        self.simulation = simulation
        simulation.log.info('Creating blended VOF multiphase model')

        # Define function space and solution function
        V = simulation.data['Vc']
        self.degree = V.ufl_element().degree()
        simulation.data['c'] = Function(V)
        simulation.data['cp'] = Function(V)
        simulation.data['cpp'] = Function(V)

        # The projected density and viscosity functions for the new time step can be made continuous
        self.continuous_fields = simulation.input.get_value(
            'multiphase_solver/continuous_fields', CONTINUOUS_FIELDS, 'bool')
        if self.continuous_fields:
            simulation.log.info('    Using continuous rho and nu fields')
            mesh = simulation.data['mesh']
            V_cont = dolfin.FunctionSpace(mesh, 'CG', self.degree + 1)
            self.continuous_c = dolfin.Function(V_cont)
            self.continuous_c_old = dolfin.Function(V_cont)
            self.continuous_c_oldold = dolfin.Function(V_cont)

        self.force_bounded = simulation.input.get_value(
            'multiphase_solver/force_bounded', FORCE_BOUNDED, 'bool')
        self.force_sharp = simulation.input.get_value(
            'multiphase_solver/force_sharp', FORCE_SHARP, 'bool')

        # Calculate mu from rho and nu (i.e mu is quadratic in c) or directly from c (linear in c)
        self.calculate_mu_directly_from_colour_function = simulation.input.get_value(
            'multiphase_solver/calculate_mu_directly_from_colour_function',
            CALCULATE_MU_DIRECTLY_FROM_COLOUR_FUNCTION,
            'bool',
        )

        # Get the physical properties
        self.set_physical_properties(read_input=True)

        # The convection blending function that counteracts numerical diffusion
        scheme = simulation.input.get_value('convection/c/convection_scheme',
                                            CONVECTION_SCHEME, 'string')
        simulation.log.info(
            '    Using convection scheme %s for the colour function' % scheme)
        scheme_class = get_convection_scheme(scheme)
        self.convection_scheme = scheme_class(simulation, 'c')
        self.need_gradient = scheme_class.need_alpha_gradient

        # Create the equations when the simulation starts
        simulation.hooks.add_pre_simulation_hook(
            self.on_simulation_start,
            'BlendedAlgebraicVofModel setup equations')

        # Update the rho and nu fields before each time step
        simulation.hooks.add_pre_timestep_hook(
            self.update, 'BlendedAlgebraicVofModel - update colour field')
        simulation.hooks.register_custom_hook_point('MultiPhaseModelUpdated')

        # Linear solver
        # This causes the MPI unit tests to fail in "random" places for some reason
        # Quick fix: lazy loading of the solver
        LAZY_LOAD_SOLVER = True
        if LAZY_LOAD_SOLVER:
            self.solver = None
        else:
            self.solver = linear_solver_from_input(
                self.simulation, 'solver/c', default_parameters=SOLVER_OPTIONS)

        # Subcycle the VOF calculation multiple times per Navier-Stokes time step
        self.num_subcycles = scheme = simulation.input.get_value(
            'multiphase_solver/num_subcycles', NUM_SUBCYCLES, 'int')
        if self.num_subcycles < 1:
            self.num_subcycles = 1

        # Time stepping based on the subcycled values
        if self.num_subcycles == 1:
            self.cp = simulation.data['cp']
            self.cpp = simulation.data['cpp']
        else:
            self.cp = dolfin.Function(V)
            self.cpp = dolfin.Function(V)

        # Plot density and viscosity fields for visualization
        self.plot_fields = simulation.input.get_value(
            'multiphase_solver/plot_fields', PLOT_FIELDS, 'bool')
        if self.plot_fields:
            V_plot = V if not self.continuous_fields else V_cont
            self.rho_for_plot = Function(V_plot)
            self.nu_for_plot = Function(V_plot)
            self.rho_for_plot.rename('rho', 'Density')
            self.nu_for_plot.rename('nu', 'Kinematic viscosity')
            simulation.io.add_extra_output_function(self.rho_for_plot)
            simulation.io.add_extra_output_function(self.nu_for_plot)

        # Slope limiter in case we are using DG1, not DG0
        self.slope_limiter = SlopeLimiter(simulation, 'c',
                                          simulation.data['c'])
        simulation.log.info('    Using slope limiter: %s' %
                            self.slope_limiter.limiter_method)
        self.is_first_timestep = True

    def on_simulation_start(self):
        """
        This runs when the simulation starts. It does not run in __init__
        since the solver needs the density and viscosity we define, and
        we need the velocity that is defined by the solver
        """
        sim = self.simulation
        beta = self.convection_scheme.blending_function

        # The time step (real value to be supplied later)
        self.dt = Constant(sim.dt / self.num_subcycles)

        # Setup the equation to solve
        c = sim.data['c']
        cp = self.cp
        cpp = self.cpp
        dirichlet_bcs = sim.data['dirichlet_bcs'].get('c', [])

        # Use backward Euler (BDF1) for timestep 1
        self.time_coeffs = Constant([1, -1, 0])

        if dolfin.norm(cpp.vector()) > 0 and self.num_subcycles == 1:
            # Use BDF2 from the start
            self.time_coeffs.assign(Constant([3 / 2, -2, 1 / 2]))
            sim.log.info(
                'Using second order timestepping from the start in BlendedAlgebraicVOF'
            )

        # Make sure the convection scheme has something useful in the first iteration
        c.assign(sim.data['cp'])

        if self.num_subcycles > 1:
            cp.assign(sim.data['cp'])

        # Plot density and viscosity
        self.update_plot_fields()

        # Define equation for advection of the colour function
        #    ∂c/∂t +  ∇⋅(c u) = 0
        Vc = sim.data['Vc']
        project_dgt0 = sim.input.get_value(
            'multiphase_solver/project_uconv_dgt0', True, 'bool')
        if self.degree == 0 and project_dgt0:
            self.vel_dgt0_projector = VelocityDGT0Projector(
                sim, sim.data['u_conv'])
            self.u_conv = self.vel_dgt0_projector.velocity
        else:
            self.u_conv = sim.data['u_conv']
        forcing_zones = sim.data['forcing_zones'].get('c', [])
        self.eq = AdvectionEquation(
            sim,
            Vc,
            cp,
            cpp,
            self.u_conv,
            beta,
            time_coeffs=self.time_coeffs,
            dirichlet_bcs=dirichlet_bcs,
            forcing_zones=forcing_zones,
            dt=self.dt,
        )

        if self.need_gradient:
            # Reconstruct the gradient from the colour function DG0 field
            self.convection_scheme.initialize_gradient()

        # Notify listeners that the initial values are available
        sim.hooks.run_custom_hook('MultiPhaseModelUpdated')

    def get_colour_function(self, k):
        """
        Return the colour function on timestep t^{n+k}
        """
        if k == 0:
            if self.continuous_fields:
                c = self.continuous_c
            else:
                c = self.simulation.data['c']
        elif k == -1:
            if self.continuous_fields:
                c = self.continuous_c_old
            else:
                c = self.simulation.data['cp']
        elif k == -2:
            if self.continuous_fields:
                c = self.continuous_c_oldold
            else:
                c = self.simulation.data['cpp']

        if self.force_bounded:
            c = dolfin.max_value(dolfin.min_value(c, Constant(1.0)),
                                 Constant(0.0))

        if self.force_sharp:
            c = dolfin.conditional(dolfin.ge(c, 0.5), Constant(1.0),
                                   Constant(0.0))

        return c

    def update_plot_fields(self):
        """
        These fields are only needed to visualise the rho and nu fields
        in xdmf format for Paraview or similar
        """
        if not self.plot_fields:
            return
        V = self.rho_for_plot.function_space()
        dolfin.project(self.get_density(0), V, function=self.rho_for_plot)
        dolfin.project(self.get_laminar_kinematic_viscosity(0),
                       V,
                       function=self.nu_for_plot)

    def update(self, timestep_number, t, dt):
        """
        Update the VOF field by advecting it for a time dt
        using the given divergence free velocity field
        """
        timer = dolfin.Timer('Ocellaris update VOF')
        sim = self.simulation

        # Get the functions
        c = sim.data['c']
        cp = sim.data['cp']
        cpp = sim.data['cpp']

        # Stop early if the free surface is forced to stay still
        force_static = sim.input.get_value('multiphase_solver/force_static',
                                           FORCE_STATIC, 'bool')
        if force_static:
            c.assign(cp)
            cpp.assign(cp)
            timer.stop()  # Stop timer before hook
            sim.hooks.run_custom_hook('MultiPhaseModelUpdated')
            self.is_first_timestep = False
            return

        if timestep_number != 1:
            # Update the previous values
            cpp.assign(cp)
            cp.assign(c)

            if self.degree == 0:
                self.vel_dgt0_projector.update()

        # Reconstruct the gradients
        if self.need_gradient:
            self.convection_scheme.gradient_reconstructor.reconstruct()

        # Update the convection blending factors
        is_static = isinstance(self.convection_scheme, StaticScheme)
        if not is_static:
            self.convection_scheme.update(dt / self.num_subcycles, self.u_conv)

        # Update global bounds in slope limiter
        if self.is_first_timestep:
            lo, hi = self.slope_limiter.set_global_bounds(lo=0.0, hi=1.0)
            if self.slope_limiter.has_global_bounds:
                sim.log.info(
                    'Setting global bounds [%r, %r] in BlendedAlgebraicVofModel'
                    % (lo, hi))

        # Solve the advection equations for the colour field
        if timestep_number == 1 or is_static:
            c.assign(cp)
        else:
            if self.solver is None:
                sim.log.info('Creating colour function solver', flush=True)
                self.solver = linear_solver_from_input(
                    self.simulation,
                    'solver/c',
                    default_parameters=SOLVER_OPTIONS)

            # Solve the advection equation
            A = self.eq.assemble_lhs()
            for _ in range(self.num_subcycles):
                b = self.eq.assemble_rhs()
                self.solver.inner_solve(A, c.vector(), b, 1, 0)
                self.slope_limiter.run()
                if self.num_subcycles > 1:
                    self.cpp.assign(self.cp)
                    self.cp.assign(c)

        # Optionally use a continuous predicted colour field
        if self.continuous_fields:
            Vcg = self.continuous_c.function_space()
            dolfin.project(c, Vcg, function=self.continuous_c)
            dolfin.project(cp, Vcg, function=self.continuous_c_old)
            dolfin.project(cpp, Vcg, function=self.continuous_c_oldold)

        # Report properties of the colour field
        sim.reporting.report_timestep_value('min(c)', c.vector().min())
        sim.reporting.report_timestep_value('max(c)', c.vector().max())

        # The next update should use the dt from this time step of the
        # main Navier-Stoke solver. The update just computed above uses
        # data from the previous Navier-Stokes solve with the previous dt
        self.dt.assign(dt / self.num_subcycles)

        if dt != sim.dt_prev:
            # Temporary switch to first order timestepping for the next
            # time step. This code is run before the Navier-Stokes solver
            # in each time step
            sim.log.info(
                'VOF solver is first order this time step due to change in dt')
            self.time_coeffs.assign(Constant([1.0, -1.0, 0.0]))
        else:
            # Use second order backward time difference next time step
            self.time_coeffs.assign(Constant([3 / 2, -2.0, 1 / 2]))

        self.update_plot_fields()
        timer.stop()  # Stop timer before hook
        sim.hooks.run_custom_hook('MultiPhaseModelUpdated')
        self.is_first_timestep = False
Example #24
0
Rb = Constant(1.0)
eta_top = Constant(1.0)
eta_bottom = Constant(0.01)
eta = eta_bottom + phi * (eta_top - eta_bottom)
forms_stokes = FormsStokes(mesh, mixedL, mixedG, alpha) \
    .forms_steady(eta, Rb * phi * Constant((0, -1, 0)))

ssc = StokesStaticCondensation(mesh,
                               forms_stokes['A_S'], forms_stokes['G_S'],
                               forms_stokes['B_S'],
                               forms_stokes['Q_S'], forms_stokes['S_S'])

# Particle advector
C_CFL = 0.5
hmin = MPI.min(comm, mesh.hmin())
ap = advect_rk3(ptcls, u_vec.function_space(), u_vec, "closed")


# Write particles and their values to XDMF file
particles_directory = "./particles/"
points_list = list(Point(*pp) for pp in ptcls.positions())
particles_values = ptcls.get_property(property_idx)
XDMFFile(os.path.join(particles_directory, "step%.4d.xdmf" % 0)) \
    .write(points_list, particles_values)

n_particles = MPI.sum(comm, len(points_list))
info("Solving with %d particles" % n_particles)

# Write the intitial compostition field to XDMF file
XDMFFile("composition.xdmf").write_checkpoint(phi, "composition", float(t), append=False)
conservation0 = assemble(phi * dx)
Example #25
0
        print('Estimated model parameter values:')
        print(m0 + dm)
        print('Perturbed model parameter values:')
        print(m1)
        print()

        # Reset reference model state
        dT_msr_noise.assign(dolfin.Constant([0, 0, 0]))
        inverse_solver.assign_model_parameters(m0)
        inverse_solver.solve_inverse_problem()

    if TEST_SENSITIVITY_DISPLACEMENT_MEASUREMENTS:
        logger.info('Test displacement measurement sensitivity')

        # Uniform perturbation of all displacements
        perturb_u_msr = np.full((u.function_space().dim(), ), 0.1 * uxD_max)

        m0 = np.array(inverse_solver.view_model_parameter_values())

        dm = sum(
            inverse_solver.observe_dmdu_msr(t)[i_msr_u]
            for t in inverse_solver.observation_times).dot(perturb_u_msr)

        du_msr_noise.vector().set_local(perturb_u_msr)

        n, b = inverse_solver.solve_inverse_problem()  # Default times
        if not b: logger.error('Inverse solver did not converge')

        m1 = np.array(inverse_solver.view_model_parameter_values())

        passed_test_sensitivity_displacements = \