Exemple #1
0
class EmbeddedDGAdvection(DGAdvection):

    def __init__(self, state, V, Vdg=None, continuity=False):

        if Vdg is None:
            Vdg_elt = BrokenElement(V.ufl_element())
            Vdg = FunctionSpace(state.mesh, Vdg_elt)

        super(EmbeddedDGAdvection, self).__init__(state, Vdg, continuity)

        self.xdg_in = Function(Vdg)
        self.xdg_out = Function(Vdg)

        self.x_projected = Function(V)
        pparameters = {'ksp_type':'cg',
                       'pc_type':'bjacobi',
                       'sub_pc_type':'ilu'}
        self.Projector = Projector(self.xdg_out, self.x_projected,
                                   solver_parameters=pparameters)

    def apply(self, x_in, x_out):

        self.xdg_in.interpolate(x_in)
        super(EmbeddedDGAdvection, self).apply(self.xdg_in, self.xdg_out)
        self.Projector.project()
        x_out.assign(self.x_projected)
Exemple #2
0
    def _setup(self, state, field, options):

        if options.name in ["embedded_dg", "recovered"]:
            self.fs = options.embedding_space
            self.xdg_in = Function(self.fs)
            self.xdg_out = Function(self.fs)
            self.x_projected = Function(field.function_space())
            parameters = {'ksp_type': 'cg',
                          'pc_type': 'bjacobi',
                          'sub_pc_type': 'ilu'}
            self.Projector = Projector(self.xdg_out, self.x_projected,
                                       solver_parameters=parameters)

        if options.name == "recovered":
            # set up the necessary functions
            self.x_in = Function(field.function_space())
            x_rec = Function(options.recovered_space)
            x_brok = Function(options.broken_space)

            # set up interpolators and projectors
            self.x_rec_projector = Recoverer(self.x_in, x_rec, VDG=self.fs, boundary_method=options.boundary_method)  # recovered function
            self.x_brok_projector = Projector(x_rec, x_brok)  # function projected back
            self.xdg_interpolator = Interpolator(self.x_in + x_rec - x_brok, self.xdg_in)
            if self.limiter is not None:
                self.x_brok_interpolator = Interpolator(self.xdg_out, x_brok)
                self.x_out_projector = Recoverer(x_brok, self.x_projected)
Exemple #3
0
    def _prepare_output(self, function, cg):
        from firedrake import FunctionSpace, VectorFunctionSpace, \
            TensorFunctionSpace, Function, Projector, Interpolator

        name = function.name()

        # Need to project/interpolate?
        # If space is linear and continuity of output space matches
        # continuity of current space, then we can just use the
        # input function.
        if is_linear(function.function_space()) and \
           is_dg(function.function_space()) == (not cg) and \
           is_cg(function.function_space()) == cg:
            return OFunction(array=get_array(function),
                             name=name, function=function)

        # OK, let's go and do it.
        if cg:
            family = "Lagrange"
        else:
            family = "Discontinuous Lagrange"

        output = self._output_functions.get(function)
        if output is None:
            # Build appropriate space for output function.
            shape = function.ufl_shape
            if len(shape) == 0:
                V = FunctionSpace(function.ufl_domain(), family, 1)
            elif len(shape) == 1:
                if numpy.prod(shape) > 3:
                    raise ValueError("Can't write vectors with more than 3 components")
                V = VectorFunctionSpace(function.ufl_domain(), family, 1,
                                        dim=shape[0])
            elif len(shape) == 2:
                if numpy.prod(shape) > 9:
                    raise ValueError("Can't write tensors with more than 9 components")
                V = TensorFunctionSpace(function.ufl_domain(), family, 1,
                                        shape=shape)
            else:
                raise ValueError("Unsupported shape %s" % (shape, ))
            output = Function(V)
            self._output_functions[function] = output

        if self.project:
            projector = self._mappers.get(function)
            if projector is None:
                projector = Projector(function, output)
                self._mappers[function] = projector
            projector.project()
        else:
            interpolator = self._mappers.get(function)
            if interpolator is None:
                interpolator = Interpolator(function, output)
                self._mappers[function] = interpolator
            interpolator.interpolate()

        return OFunction(array=get_array(output), name=name, function=output)
Exemple #4
0
    def _prepare_output(self, function, cg):
        from firedrake import FunctionSpace, VectorFunctionSpace, \
            TensorFunctionSpace, Function, Projector, Interpolator

        name = function.name()

        # Need to project/interpolate?
        # If space is linear and continuity of output space matches
        # continuity of current space, then we can just use the
        # input function.
        if is_linear(function.function_space()) and \
           is_dg(function.function_space()) == (not cg) and \
           is_cg(function.function_space()) == cg:
            return OFunction(array=get_array(function),
                             name=name, function=function)

        # OK, let's go and do it.
        if cg:
            family = "Lagrange"
        else:
            family = "Discontinuous Lagrange"

        output = self._output_functions.get(function)
        if output is None:
            # Build appropriate space for output function.
            shape = function.ufl_shape
            if len(shape) == 0:
                V = FunctionSpace(function.ufl_domain(), family, 1)
            elif len(shape) == 1:
                if numpy.prod(shape) > 3:
                    raise ValueError("Can't write vectors with more than 3 components")
                V = VectorFunctionSpace(function.ufl_domain(), family, 1,
                                        dim=shape[0])
            elif len(shape) == 2:
                if numpy.prod(shape) > 9:
                    raise ValueError("Can't write tensors with more than 9 components")
                V = TensorFunctionSpace(function.ufl_domain(), family, 1,
                                        shape=shape)
            else:
                raise ValueError("Unsupported shape %s" % (shape, ))
            output = Function(V)
            self._output_functions[function] = output

        if self.project:
            projector = self._mappers.get(function)
            if projector is None:
                projector = Projector(function, output)
                self._mappers[function] = projector
            projector.project()
        else:
            interpolator = self._mappers.get(function)
            if interpolator is None:
                interpolator = Interpolator(function, output)
                self._mappers[function] = interpolator
            interpolator.interpolate()

        return OFunction(array=get_array(output), name=name, function=output)
Exemple #5
0
    def __init__(self, state, problem, physics_list=None,
                 prescribed_transporting_velocity=None):

        super().__init__(state, problem,
                         physics_list=physics_list)

        if prescribed_transporting_velocity is not None:
            self.velocity_projection = Projector(prescribed_transporting_velocity(self.state.t),
                                                 self.state.fields('u'))
        else:
            self.velocity_projection = None
Exemple #6
0
    def _prepare_output(self, function, max_elem):
        from firedrake import FunctionSpace, VectorFunctionSpace, \
            TensorFunctionSpace, Function, Projector, Interpolator

        name = function.name()
        # Need to project/interpolate?
        # If space is not the max element, we can do so.
        if function.ufl_element == max_elem:
            return OFunction(array=get_array(function),
                             name=name,
                             function=function)
        #  OK, let's go and do it.
        shape = function.ufl_shape
        output = self._output_functions.get(function)
        if output is None:
            # Build appropriate space for output function.
            shape = function.ufl_shape
            if len(shape) == 0:
                V = FunctionSpace(function.ufl_domain(), max_elem)
            elif len(shape) == 1:
                if numpy.prod(shape) > 3:
                    raise ValueError(
                        "Can't write vectors with more than 3 components")
                V = VectorFunctionSpace(function.ufl_domain(),
                                        max_elem,
                                        dim=shape[0])
            elif len(shape) == 2:
                if numpy.prod(shape) > 9:
                    raise ValueError(
                        "Can't write tensors with more than 9 components")
                V = TensorFunctionSpace(function.ufl_domain(),
                                        max_elem,
                                        shape=shape)
            else:
                raise ValueError("Unsupported shape %s" % (shape, ))
            output = Function(V)
            self._output_functions[function] = output
        if self.project:
            projector = self._mappers.get(function)
            if projector is None:
                projector = Projector(function, output)
                self._mappers[function] = projector
            projector.project()
        else:
            interpolator = self._mappers.get(function)
            if interpolator is None:
                interpolator = Interpolator(function, output)
                self._mappers[function] = interpolator
            interpolator.interpolate()

        return OFunction(array=get_array(output), name=name, function=output)
Exemple #7
0
class PrescribedTransport(Timestepper):

    def __init__(self, state, problem, physics_list=None,
                 prescribed_transporting_velocity=None):

        super().__init__(state, problem,
                         physics_list=physics_list)

        if prescribed_transporting_velocity is not None:
            self.velocity_projection = Projector(prescribed_transporting_velocity(self.state.t),
                                                 self.state.fields('u'))
        else:
            self.velocity_projection = None

    @property
    def transporting_velocity(self):
        return self.state.fields('u')

    def timestep(self):
        if self.velocity_projection is not None:
            self.velocity_projection.project()

        super().timestep()
Exemple #8
0
    def initialize(self, pc):
        """Set up the problem context. Take the original
        mixed problem and reformulate the problem as a
        hybridized mixed system.

        A KSP is created for the Lagrange multiplier system.
        """
        from ufl.algorithms.map_integrands import map_integrand_dags
        from firedrake import (FunctionSpace, TrialFunction, TrialFunctions,
                               TestFunction, Function, BrokenElement,
                               MixedElement, FacetNormal, Constant,
                               DirichletBC, Projector)
        from firedrake.assemble import (allocate_matrix,
                                        create_assembly_callable)
        from firedrake.formmanipulation import ArgumentReplacer, split_form

        # Extract the problem context
        prefix = pc.getOptionsPrefix()
        _, P = pc.getOperators()
        context = P.getPythonContext()
        test, trial = context.a.arguments()

        V = test.function_space()
        if V.mesh().cell_set._extruded:
            # TODO: Merge FIAT branch to support TPC trace elements
            raise NotImplementedError("Not implemented on extruded meshes.")

        # Break the function spaces and define fully discontinuous spaces
        broken_elements = [BrokenElement(Vi.ufl_element()) for Vi in V]
        elem = MixedElement(broken_elements)
        V_d = FunctionSpace(V.mesh(), elem)
        arg_map = {test: TestFunction(V_d), trial: TrialFunction(V_d)}

        # Replace the problems arguments with arguments defined
        # on the new discontinuous spaces
        replacer = ArgumentReplacer(arg_map)
        new_form = map_integrand_dags(replacer, context.a)

        # Create the space of approximate traces.
        # The vector function space will have a non-empty value_shape
        W = next(v for v in V if bool(v.ufl_element().value_shape()))
        if W.ufl_element().family() in ["Raviart-Thomas", "RTCF"]:
            tdegree = W.ufl_element().degree() - 1

        else:
            tdegree = W.ufl_element().degree()

        # NOTE: Once extruded is ready, we will need to be aware of this
        # and construct the appropriate trace space for the HDiv element
        TraceSpace = FunctionSpace(V.mesh(), "HDiv Trace", tdegree)

        # NOTE: For extruded, we will need to add "on_top" and "on_bottom"
        trace_conditions = [
            DirichletBC(TraceSpace, Constant(0.0), "on_boundary")
        ]

        # Set up the functions for the original, hybridized
        # and schur complement systems
        self.broken_solution = Function(V_d)
        self.broken_rhs = Function(V_d)
        self.trace_solution = Function(TraceSpace)
        self.unbroken_solution = Function(V)
        self.unbroken_rhs = Function(V)

        # Create the symbolic Schur-reduction
        Atilde = Tensor(new_form)
        gammar = TestFunction(TraceSpace)
        n = FacetNormal(V.mesh())

        # Vector trial function will have a non-empty ufl_shape
        sigma = next(f for f in TrialFunctions(V_d) if bool(f.ufl_shape))

        # NOTE: Once extruded is ready, this will change slightly
        # to include both horizontal and vertical interior facets
        K = Tensor(gammar('+') * ufl.dot(sigma, n) * ufl.dS)

        # Assemble the Schur complement operator and right-hand side
        self.schur_rhs = Function(TraceSpace)
        self._assemble_Srhs = create_assembly_callable(
            K * Atilde.inv * self.broken_rhs,
            tensor=self.schur_rhs,
            form_compiler_parameters=context.fc_params)

        schur_comp = K * Atilde.inv * K.T
        self.S = allocate_matrix(schur_comp,
                                 bcs=trace_conditions,
                                 form_compiler_parameters=context.fc_params)
        self._assemble_S = create_assembly_callable(
            schur_comp,
            tensor=self.S,
            bcs=trace_conditions,
            form_compiler_parameters=context.fc_params)

        self._assemble_S()
        self.S.force_evaluation()
        Smat = self.S.petscmat

        # Nullspace for the multiplier problem
        nullsp = P.getNullSpace()
        if nullsp.handle != 0:
            new_vecs = get_trace_nullspace_vecs(K * Atilde.inv, nullsp, V, V_d,
                                                TraceSpace)
            tr_nullsp = PETSc.NullSpace().create(vectors=new_vecs,
                                                 comm=pc.comm)
            Smat.setNullSpace(tr_nullsp)

        # Set up the KSP for the system of Lagrange multipliers
        ksp = PETSc.KSP().create(comm=pc.comm)
        ksp.setOptionsPrefix(prefix + "trace_")
        ksp.setTolerances(rtol=1e-13)
        ksp.setOperators(Smat)
        ksp.setUp()
        ksp.setFromOptions()
        self.ksp = ksp

        # Now we construct the local tensors for the reconstruction stage
        # TODO: Add support for mixed tensors and these variables
        # become unnecessary
        split_forms = split_form(new_form)
        A = Tensor(next(sf.form for sf in split_forms if sf.indices == (0, 0)))
        B = Tensor(next(sf.form for sf in split_forms if sf.indices == (1, 0)))
        C = Tensor(next(sf.form for sf in split_forms if sf.indices == (1, 1)))
        trial = TrialFunction(
            FunctionSpace(V.mesh(), BrokenElement(W.ufl_element())))
        K_local = Tensor(gammar('+') * ufl.dot(trial, n) * ufl.dS)

        # Split functions and reconstruct each bit separately
        sigma_h, u_h = self.broken_solution.split()
        g, f = self.broken_rhs.split()

        # Pressure reconstruction
        M = B * A.inv * B.T + C
        u_sol = M.inv * f + M.inv * (
            B * A.inv * K_local.T * self.trace_solution - B * A.inv * g)
        self._assemble_pressure = create_assembly_callable(
            u_sol, tensor=u_h, form_compiler_parameters=context.fc_params)

        # Velocity reconstruction
        sigma_sol = A.inv * g + A.inv * (B.T * u_h -
                                         K_local.T * self.trace_solution)
        self._assemble_velocity = create_assembly_callable(
            sigma_sol,
            tensor=sigma_h,
            form_compiler_parameters=context.fc_params)

        # Set up the projector for projecting the broken solution
        # into the unbroken finite element spaces
        # NOTE: Tolerance here matters!
        sigma_b, _ = self.broken_solution.split()
        sigma_u, _ = self.unbroken_solution.split()
        self.projector = Projector(sigma_b,
                                   sigma_u,
                                   solver_parameters={
                                       "ksp_type": "cg",
                                       "ksp_rtol": 1e-13
                                   })
Exemple #9
0
    def __init__(self, v_in, v_out, VDG=None, boundary_method=None):

        # check if v_in is valid
        if isinstance(v_in, expression.Expression) or not isinstance(
                v_in, (ufl.core.expr.Expr, function.Function)):
            raise ValueError(
                "Can only recover UFL expression or Functions not '%s'" %
                type(v_in))

        self.v_in = v_in
        self.v_out = v_out
        self.V = v_out.function_space()
        if VDG is not None:
            self.v = Function(VDG)
            self.interpolator = Interpolator(v_in, self.v)
        else:
            self.v = v_in
            self.interpolator = None

        self.VDG = VDG
        self.boundary_method = boundary_method
        self.averager = Averager(self.v, self.v_out)

        # check boundary method options are valid
        if boundary_method is not None:
            if boundary_method != Boundary_Method.dynamics and boundary_method != Boundary_Method.physics:
                raise ValueError(
                    "Boundary method must be a Boundary_Method Enum object.")
            if VDG is None:
                raise ValueError(
                    "If boundary_method is specified, VDG also needs specifying."
                )

            # now specify things that we'll need if we are doing boundary recovery
            if boundary_method == Boundary_Method.physics:
                # check dimensions
                if self.V.value_size != 1:
                    raise ValueError(
                        'This method only works for scalar functions.')
                self.boundary_recoverer = Boundary_Recoverer(
                    self.v_out, self.v, method=Boundary_Method.physics)
            else:

                mesh = self.V.mesh()
                # this ensures we get the pure function space, not an indexed function space
                V0 = FunctionSpace(mesh,
                                   self.v_in.function_space().ufl_element())
                VCG1 = FunctionSpace(mesh, "CG", 1)
                if V0.extruded:
                    cell = mesh._base_mesh.ufl_cell().cellname()
                    DG1_hori_elt = FiniteElement("DG",
                                                 cell,
                                                 1,
                                                 variant="equispaced")
                    DG1_vert_elt = FiniteElement("DG",
                                                 interval,
                                                 1,
                                                 variant="equispaced")
                    DG1_element = TensorProductElement(DG1_hori_elt,
                                                       DG1_vert_elt)
                else:
                    cell = mesh.ufl_cell().cellname()
                    DG1_element = FiniteElement("DG",
                                                cell,
                                                1,
                                                variant="equispaced")
                VDG1 = FunctionSpace(mesh, DG1_element)

                if self.V.value_size == 1:
                    coords_to_adjust = find_coords_to_adjust(V0, VDG1)

                    self.boundary_recoverer = Boundary_Recoverer(
                        self.v_out,
                        self.v,
                        coords_to_adjust=coords_to_adjust,
                        method=Boundary_Method.dynamics)
                else:
                    VuDG1 = VectorFunctionSpace(mesh, DG1_element)
                    coords_to_adjust = find_coords_to_adjust(V0, VuDG1)

                    # now, break the problem down into components
                    v_scalars = []
                    v_out_scalars = []
                    self.boundary_recoverers = []
                    self.project_to_scalars_CG = []
                    self.extra_averagers = []
                    coords_to_adjust_list = []
                    for i in range(self.V.value_size):
                        v_scalars.append(Function(VDG1))
                        v_out_scalars.append(Function(VCG1))
                        coords_to_adjust_list.append(
                            Function(VDG1).project(coords_to_adjust[i]))
                        self.project_to_scalars_CG.append(
                            Projector(self.v_out[i], v_out_scalars[i]))
                        self.boundary_recoverers.append(
                            Boundary_Recoverer(
                                v_out_scalars[i],
                                v_scalars[i],
                                method=Boundary_Method.dynamics,
                                coords_to_adjust=coords_to_adjust_list[i]))
                        # need an extra averager that works on the scalar fields rather than the vector one
                        self.extra_averagers.append(
                            Averager(v_scalars[i], v_out_scalars[i]))

                    # the boundary recoverer needs to be done on a scalar fields
                    # so need to extract component and restore it after the boundary recovery is done
                    self.interpolate_to_vector = Interpolator(
                        as_vector(v_out_scalars), self.v_out)
Exemple #10
0
    def __init__(self, state, field, equation=None, *, solver_parameters=None,
                 limiter=None):

        if equation is not None:

            self.state = state
            self.field = field
            self.equation = equation
            # get ubar from the equation class
            self.ubar = self.equation.ubar
            self.dt = self.state.timestepping.dt

            # get default solver options if none passed in
            if solver_parameters is None:
                self.solver_parameters = equation.solver_parameters
            else:
                self.solver_parameters = solver_parameters
                if state.output.log_level == DEBUG:
                    self.solver_parameters["ksp_monitor_true_residual"] = True

            self.limiter = limiter

        # check to see if we are using an embedded DG method - if we are then
        # the projector and output function will have been set up in the
        # equation class and we can get the correct function space from
        # the output function.
        if isinstance(equation, EmbeddedDGAdvection):
            # check that the field and the equation are compatible
            if equation.V0 != field.function_space():
                raise ValueError('The field to be advected is not compatible with the equation used.')
            self.embedded_dg = True
            fs = equation.space
            self.xdg_in = Function(fs)
            self.xdg_out = Function(fs)
            self.x_projected = Function(field.function_space())
            parameters = {'ksp_type': 'cg',
                          'pc_type': 'bjacobi',
                          'sub_pc_type': 'ilu'}
            self.Projector = Projector(self.xdg_out, self.x_projected,
                                       solver_parameters=parameters)
            self.recovered = equation.recovered
            if self.recovered:
                # set up the necessary functions
                self.x_in = Function(field.function_space())
                x_adv = Function(fs)
                x_rec = Function(equation.V_rec)
                x_brok = Function(equation.V_brok)

                # set up interpolators and projectors
                self.x_adv_interpolator = Interpolator(self.x_in, x_adv)  # interpolate before recovery
                self.x_rec_projector = Recoverer(x_adv, x_rec)  # recovered function
                # when the "average" method comes into firedrake master, this will be
                # self.x_rec_projector = Projector(self.x_in, equation.Vrec, method="average")
                self.x_brok_projector = Projector(x_rec, x_brok)  # function projected back
                self.xdg_interpolator = Interpolator(self.x_in + x_rec - x_brok, self.xdg_in)
                if self.limiter is not None:
                    self.x_brok_interpolator = Interpolator(self.xdg_out, x_brok)
                    self.x_out_projector = Recoverer(x_brok, self.x_projected)
                    # when the "average" method comes into firedrake master, this will be
                    # self.x_out_projector = Projector(x_brok, self.x_projected, method="average")
        else:
            self.embedded_dg = False
            fs = field.function_space()

        # setup required functions
        self.fs = fs
        self.dq = Function(fs)
        self.q1 = Function(fs)
Exemple #11
0
class Advection(object, metaclass=ABCMeta):
    """
    Base class for advection schemes.

    :arg state: :class:`.State` object.
    :arg field: field to be advected
    :arg equation: :class:`.Equation` object, specifying the equation
    that field satisfies
    :arg solver_parameters: solver_parameters
    :arg limiter: :class:`.Limiter` object.
    :arg options: :class:`.AdvectionOptions` object
    """

    def __init__(self, state, field, equation=None, *, solver_parameters=None,
                 limiter=None):

        if equation is not None:

            self.state = state
            self.field = field
            self.equation = equation
            # get ubar from the equation class
            self.ubar = self.equation.ubar
            self.dt = self.state.timestepping.dt

            # get default solver options if none passed in
            if solver_parameters is None:
                self.solver_parameters = equation.solver_parameters
            else:
                self.solver_parameters = solver_parameters
                if logger.isEnabledFor(DEBUG):
                    self.solver_parameters["ksp_monitor_true_residual"] = True

            self.limiter = limiter

            if hasattr(equation, "options"):
                self.discretisation_option = equation.options.name
                self._setup(state, field, equation.options)
            else:
                self.discretisation_option = None
                self.fs = field.function_space()

            # setup required functions
            self.dq = Function(self.fs)
            self.q1 = Function(self.fs)

    def _setup(self, state, field, options):

        if options.name in ["embedded_dg", "recovered"]:
            self.fs = options.embedding_space
            self.xdg_in = Function(self.fs)
            self.xdg_out = Function(self.fs)
            self.x_projected = Function(field.function_space())
            parameters = {'ksp_type': 'cg',
                          'pc_type': 'bjacobi',
                          'sub_pc_type': 'ilu'}
            self.Projector = Projector(self.xdg_out, self.x_projected,
                                       solver_parameters=parameters)

        if options.name == "recovered":
            # set up the necessary functions
            self.x_in = Function(field.function_space())
            x_rec = Function(options.recovered_space)
            x_brok = Function(options.broken_space)

            # set up interpolators and projectors
            self.x_rec_projector = Recoverer(self.x_in, x_rec, VDG=self.fs, boundary_method=options.boundary_method)  # recovered function
            self.x_brok_projector = Projector(x_rec, x_brok)  # function projected back
            self.xdg_interpolator = Interpolator(self.x_in + x_rec - x_brok, self.xdg_in)
            if self.limiter is not None:
                self.x_brok_interpolator = Interpolator(self.xdg_out, x_brok)
                self.x_out_projector = Recoverer(x_brok, self.x_projected)

    def pre_apply(self, x_in, discretisation_option):
        """
        Extra steps to advection if using an embedded method,
        which might be either the plain embedded method or the
        recovered space advection scheme.

        :arg x_in: the input set of prognostic fields.
        :arg discretisation option: string specifying which scheme to use.
        """
        if discretisation_option == "embedded_dg":
            try:
                self.xdg_in.interpolate(x_in)
            except NotImplementedError:
                self.xdg_in.project(x_in)

        elif discretisation_option == "recovered":
            self.x_in.assign(x_in)
            self.x_rec_projector.project()
            self.x_brok_projector.project()
            self.xdg_interpolator.interpolate()

    def post_apply(self, x_out, discretisation_option):
        """
        The projection steps, returning a field to its original space
        for an embedded DG advection scheme. For the case of the
        recovered scheme, there are two options dependent on whether
        the scheme is limited or not.

        :arg x_out: the outgoing field.
        :arg discretisation_option: string specifying which option to use.
        """
        if discretisation_option == "embedded_dg":
            self.Projector.project()

        elif discretisation_option == "recovered":
            if self.limiter is not None:
                self.x_brok_interpolator.interpolate()
                self.x_out_projector.project()
            else:
                self.Projector.project()
        x_out.assign(self.x_projected)

    @abstractproperty
    def lhs(self):
        return self.equation.mass_term(self.equation.trial)

    @abstractproperty
    def rhs(self):
        return self.equation.mass_term(self.q1) - self.dt*self.equation.advection_term(self.q1)

    def update_ubar(self, xn, xnp1, alpha):
        un = xn.split()[0]
        unp1 = xnp1.split()[0]
        self.ubar.assign(un + alpha*(unp1-un))

    @cached_property
    def solver(self):
        # setup solver using lhs and rhs defined in derived class
        problem = LinearVariationalProblem(self.lhs, self.rhs, self.dq)
        solver_name = self.field.name()+self.equation.__class__.__name__+self.__class__.__name__
        return LinearVariationalSolver(problem, solver_parameters=self.solver_parameters, options_prefix=solver_name)

    @abstractmethod
    def apply(self, x_in, x_out):
        """
        Function takes x as input, computes L(x) as defined by the equation,
        and returns x_out as output.

        :arg x: :class:`.Function` object, the input Function.
        :arg x_out: :class:`.Function` object, the output Function.
        """
        pass
Exemple #12
0
    def __init__(self, state, moments=AdvectedMoments.M3, limit=True):
        super().__init__(state)

        # function spaces
        Vt = state.fields('rain').function_space()
        Vu = state.fields('u').function_space()

        # declare properties of class
        self.state = state
        self.moments = moments
        self.rain = state.fields('rain')
        self.v = state.fields('rainfall_velocity', Vu)
        self.limit = limit

        if moments == AdvectedMoments.M0:
            # all rain falls at terminal velocity
            terminal_velocity = Constant(5)  # in m/s
            if state.mesh.geometric_dimension() == 2:
                self.v.project(as_vector([0, -terminal_velocity]))
            elif state.mesh.geometric_dimension() == 3:
                self.v.project(as_vector([0, 0, -terminal_velocity]))
        elif moments == AdvectedMoments.M3:
            # this advects the third moment M3 of the raindrop
            # distribution, which corresponds to the mean mass
            rho = state.fields('rho')
            rho_w = Constant(1000.0)  # density of liquid water
            # assume n(D) = n_0 * D^mu * exp(-Lambda*D)
            # n_0 = N_r * Lambda^(1+mu) / gamma(1 + mu)
            N_r = Constant(10**5)  # number of rain droplets per m^3
            mu = 0.0  # shape constant of droplet gamma distribution
            # assume V(D) = a * D^b * exp(-f*D) * (rho_0 / rho)^g
            # take f = 0
            a = Constant(362.)  # intercept for velocity distr. in log space
            b = 0.65  # inverse scale parameter for velocity distr.
            rho0 = Constant(1.22)  # reference density in kg/m^3
            g = Constant(0.5)  # scaling of density correction
            # we keep mu in the expressions even though mu = 0
            threshold = Constant(10**-10)  # only do rainfall for r > threshold
            Lambda = (N_r * pi * rho_w * gamma(4 + mu) /
                      (6 * gamma(1 + mu) * rho * self.rain))**(1. / 3)
            Lambda0 = (N_r * pi * rho_w * gamma(4 + mu) /
                       (6 * gamma(1 + mu) * rho * threshold))**(1. / 3)
            v_expression = conditional(
                self.rain > threshold,
                (a * gamma(4 + b + mu) / (gamma(4 + mu) * Lambda**b) *
                 (rho0 / rho)**g),
                (a * gamma(4 + b + mu) / (gamma(4 + mu) * Lambda0**b) *
                 (rho0 / rho)**g))
        else:
            raise NotImplementedError(
                'Currently we only have implementations for zero and one moment schemes for rainfall. Valid options are AdvectedMoments.M0 and AdvectedMoments.M3'
            )

        if moments != AdvectedMoments.M0:
            if state.mesh.geometric_dimension() == 2:
                self.determine_v = Projector(as_vector([0, -v_expression]),
                                             self.v)
            elif state.mesh.geometric_dimension() == 3:
                self.determine_v = Projector(as_vector([0, 0, -v_expression]),
                                             self.v)

        # determine whether to do recovered space advection scheme
        # if horizontal and vertical degrees are 0 do recovered space
        if state.horizontal_degree == 0 and state.vertical_degree == 0:
            VDG1 = FunctionSpace(Vt.mesh(), "DG", 1)
            VCG1 = FunctionSpace(Vt.mesh(), "CG", 1)
            Vbrok = FunctionSpace(Vt.mesh(), BrokenElement(Vt.ufl_element()))
            boundary_method = Boundary_Method.dynamics
            advect_options = RecoveredOptions(embedding_space=VDG1,
                                              recovered_space=VCG1,
                                              broken_space=Vbrok,
                                              boundary_method=boundary_method)
        else:
            advect_options = EmbeddedDGOptions()

        # need to define advection equation before limiter (as it is needed for the ThetaLimiter)
        advection_equation = EmbeddedDGAdvection(state,
                                                 Vt,
                                                 equation_form="advective",
                                                 outflow=True,
                                                 options=advect_options)

        # decide which limiter to use
        if self.limit:
            if state.horizontal_degree == 0 and state.vertical_degree == 0:
                limiter = VertexBasedLimiter(VDG1)
            elif state.horizontal_degree == 1 and state.vertical_degree == 1:
                limiter = ThetaLimiter(Vt)
            else:
                logger.warning(
                    "There is no limiter yet implemented for the spaces used. NoLimiter() is being used for the rainfall in this case."
                )
                limiter = NoLimiter()
        else:
            limiter = None

        # sedimentation will happen using a full advection method
        self.advection_method = SSPRK3(state,
                                       self.rain,
                                       advection_equation,
                                       limiter=limiter)
Exemple #13
0
    def setup(self, equation, uadv=None, apply_bcs=True, *active_labels):

        self.residual = equation.residual

        if self.field_name is not None:
            self.idx = equation.field_names.index(self.field_name)
            self.fs = self.state.fields(self.field_name).function_space()
            self.residual = self.residual.label_map(
                lambda t: t.get(prognostic) == self.field_name,
                lambda t: Term(
                    split_form(t.form)[self.idx].form,
                    t.labels),
                drop)
            bcs = equation.bcs[self.field_name]

        else:
            self.field_name = equation.field_name
            self.fs = equation.function_space
            self.idx = None
            if type(self.fs.ufl_element()) is MixedElement:
                bcs = [bc for _, bcs in equation.bcs.items() for bc in bcs]
            else:
                bcs = equation.bcs[self.field_name]

        if len(active_labels) > 0:
            self.residual = self.residual.label_map(
                lambda t: any(t.has_label(time_derivative, *active_labels)),
                map_if_false=drop)

        options = self.options

        # -------------------------------------------------------------------- #
        # Routines relating to transport
        # -------------------------------------------------------------------- #

        if hasattr(self.options, 'ibp'):
            self.replace_transport_term()
        self.replace_transporting_velocity(uadv)

        # -------------------------------------------------------------------- #
        # Wrappers for embedded / recovery methods
        # -------------------------------------------------------------------- #

        if self.discretisation_option in ["embedded_dg", "recovered"]:
            # construct the embedding space if not specified
            if options.embedding_space is None:
                V_elt = BrokenElement(self.fs.ufl_element())
                self.fs = FunctionSpace(self.state.mesh, V_elt)
            else:
                self.fs = options.embedding_space
            self.xdg_in = Function(self.fs)
            self.xdg_out = Function(self.fs)
            if self.idx is None:
                self.x_projected = Function(equation.function_space)
            else:
                self.x_projected = Function(self.state.fields(self.field_name).function_space())
            new_test = TestFunction(self.fs)
            parameters = {'ksp_type': 'cg',
                          'pc_type': 'bjacobi',
                          'sub_pc_type': 'ilu'}

        # -------------------------------------------------------------------- #
        # Make boundary conditions
        # -------------------------------------------------------------------- #

        if not apply_bcs:
            self.bcs = None
        elif self.discretisation_option in ["embedded_dg", "recovered"]:
            # Transfer boundary conditions onto test function space
            self.bcs = [DirichletBC(self.fs, bc.function_arg, bc.sub_domain) for bc in bcs]
        else:
            self.bcs = bcs

        # -------------------------------------------------------------------- #
        # Modify test function for SUPG methods
        # -------------------------------------------------------------------- #

        if self.discretisation_option == "supg":
            # construct tau, if it is not specified
            dim = self.state.mesh.topological_dimension()
            if options.tau is not None:
                # if tau is provided, check that is has the right size
                tau = options.tau
                assert as_ufl(tau).ufl_shape == (dim, dim), "Provided tau has incorrect shape!"
            else:
                # create tuple of default values of size dim
                default_vals = [options.default*self.dt]*dim
                # check for directions is which the space is discontinuous
                # so that we don't apply supg in that direction
                if is_cg(self.fs):
                    vals = default_vals
                else:
                    space = self.fs.ufl_element().sobolev_space()
                    if space.name in ["HDiv", "DirectionalH"]:
                        vals = [default_vals[i] if space[i].name == "H1"
                                else 0. for i in range(dim)]
                    else:
                        raise ValueError("I don't know what to do with space %s" % space)
                tau = Constant(tuple([
                    tuple(
                        [vals[j] if i == j else 0. for i, v in enumerate(vals)]
                    ) for j in range(dim)])
                )
                self.solver_parameters = {'ksp_type': 'gmres',
                                          'pc_type': 'bjacobi',
                                          'sub_pc_type': 'ilu'}

            test = TestFunction(self.fs)
            new_test = test + dot(dot(uadv, tau), grad(test))

        if self.discretisation_option is not None:
            # replace the original test function with one defined on
            # the embedding space, as this is the space where the
            # the problem will be solved
            self.residual = self.residual.label_map(
                all_terms,
                map_if_true=replace_test_function(new_test))

        if self.discretisation_option == "embedded_dg":
            if self.limiter is None:
                self.x_out_projector = Projector(self.xdg_out, self.x_projected,
                                                 solver_parameters=parameters)
            else:
                self.x_out_projector = Recoverer(self.xdg_out, self.x_projected)

        if self.discretisation_option == "recovered":
            # set up the necessary functions
            self.x_in = Function(self.state.fields(self.field_name).function_space())
            x_rec = Function(options.recovered_space)
            x_brok = Function(options.broken_space)

            # set up interpolators and projectors
            self.x_rec_projector = Recoverer(self.x_in, x_rec, VDG=self.fs, boundary_method=options.boundary_method)  # recovered function
            self.x_brok_projector = Projector(x_rec, x_brok)  # function projected back
            self.xdg_interpolator = Interpolator(self.x_in + x_rec - x_brok, self.xdg_in)
            if self.limiter is not None:
                self.x_brok_interpolator = Interpolator(self.xdg_out, x_brok)
                self.x_out_projector = Recoverer(x_brok, self.x_projected)
            else:
                self.x_out_projector = Projector(self.xdg_out, self.x_projected)

        # setup required functions
        self.dq = Function(self.fs)
        self.q1 = Function(self.fs)
Exemple #14
0
class TimeDiscretisation(object, metaclass=ABCMeta):
    """
    Base class for time discretisation schemes.

    :arg state: :class:`.State` object.
    :arg field: field to be evolved
    :arg equation: :class:`.Equation` object, specifying the equation
    that field satisfies
    :arg solver_parameters: solver_parameters
    :arg limiter: :class:`.Limiter` object.
    :arg options: :class:`.DiscretisationOptions` object
    """

    def __init__(self, state, field_name=None, solver_parameters=None,
                 limiter=None, options=None):

        self.state = state
        self.field_name = field_name

        self.dt = self.state.dt

        self.limiter = limiter

        self.options = options
        if options is not None:
            self.discretisation_option = options.name
        else:
            self.discretisation_option = None

        # get default solver options if none passed in
        if solver_parameters is None:
            self.solver_parameters = {'ksp_type': 'cg',
                                      'pc_type': 'bjacobi',
                                      'sub_pc_type': 'ilu'}
        else:
            self.solver_parameters = solver_parameters
            if logger.isEnabledFor(DEBUG):
                self.solver_parameters["ksp_monitor_true_residual"] = None

    def setup(self, equation, uadv=None, apply_bcs=True, *active_labels):

        self.residual = equation.residual

        if self.field_name is not None:
            self.idx = equation.field_names.index(self.field_name)
            self.fs = self.state.fields(self.field_name).function_space()
            self.residual = self.residual.label_map(
                lambda t: t.get(prognostic) == self.field_name,
                lambda t: Term(
                    split_form(t.form)[self.idx].form,
                    t.labels),
                drop)
            bcs = equation.bcs[self.field_name]

        else:
            self.field_name = equation.field_name
            self.fs = equation.function_space
            self.idx = None
            if type(self.fs.ufl_element()) is MixedElement:
                bcs = [bc for _, bcs in equation.bcs.items() for bc in bcs]
            else:
                bcs = equation.bcs[self.field_name]

        if len(active_labels) > 0:
            self.residual = self.residual.label_map(
                lambda t: any(t.has_label(time_derivative, *active_labels)),
                map_if_false=drop)

        options = self.options

        # -------------------------------------------------------------------- #
        # Routines relating to transport
        # -------------------------------------------------------------------- #

        if hasattr(self.options, 'ibp'):
            self.replace_transport_term()
        self.replace_transporting_velocity(uadv)

        # -------------------------------------------------------------------- #
        # Wrappers for embedded / recovery methods
        # -------------------------------------------------------------------- #

        if self.discretisation_option in ["embedded_dg", "recovered"]:
            # construct the embedding space if not specified
            if options.embedding_space is None:
                V_elt = BrokenElement(self.fs.ufl_element())
                self.fs = FunctionSpace(self.state.mesh, V_elt)
            else:
                self.fs = options.embedding_space
            self.xdg_in = Function(self.fs)
            self.xdg_out = Function(self.fs)
            if self.idx is None:
                self.x_projected = Function(equation.function_space)
            else:
                self.x_projected = Function(self.state.fields(self.field_name).function_space())
            new_test = TestFunction(self.fs)
            parameters = {'ksp_type': 'cg',
                          'pc_type': 'bjacobi',
                          'sub_pc_type': 'ilu'}

        # -------------------------------------------------------------------- #
        # Make boundary conditions
        # -------------------------------------------------------------------- #

        if not apply_bcs:
            self.bcs = None
        elif self.discretisation_option in ["embedded_dg", "recovered"]:
            # Transfer boundary conditions onto test function space
            self.bcs = [DirichletBC(self.fs, bc.function_arg, bc.sub_domain) for bc in bcs]
        else:
            self.bcs = bcs

        # -------------------------------------------------------------------- #
        # Modify test function for SUPG methods
        # -------------------------------------------------------------------- #

        if self.discretisation_option == "supg":
            # construct tau, if it is not specified
            dim = self.state.mesh.topological_dimension()
            if options.tau is not None:
                # if tau is provided, check that is has the right size
                tau = options.tau
                assert as_ufl(tau).ufl_shape == (dim, dim), "Provided tau has incorrect shape!"
            else:
                # create tuple of default values of size dim
                default_vals = [options.default*self.dt]*dim
                # check for directions is which the space is discontinuous
                # so that we don't apply supg in that direction
                if is_cg(self.fs):
                    vals = default_vals
                else:
                    space = self.fs.ufl_element().sobolev_space()
                    if space.name in ["HDiv", "DirectionalH"]:
                        vals = [default_vals[i] if space[i].name == "H1"
                                else 0. for i in range(dim)]
                    else:
                        raise ValueError("I don't know what to do with space %s" % space)
                tau = Constant(tuple([
                    tuple(
                        [vals[j] if i == j else 0. for i, v in enumerate(vals)]
                    ) for j in range(dim)])
                )
                self.solver_parameters = {'ksp_type': 'gmres',
                                          'pc_type': 'bjacobi',
                                          'sub_pc_type': 'ilu'}

            test = TestFunction(self.fs)
            new_test = test + dot(dot(uadv, tau), grad(test))

        if self.discretisation_option is not None:
            # replace the original test function with one defined on
            # the embedding space, as this is the space where the
            # the problem will be solved
            self.residual = self.residual.label_map(
                all_terms,
                map_if_true=replace_test_function(new_test))

        if self.discretisation_option == "embedded_dg":
            if self.limiter is None:
                self.x_out_projector = Projector(self.xdg_out, self.x_projected,
                                                 solver_parameters=parameters)
            else:
                self.x_out_projector = Recoverer(self.xdg_out, self.x_projected)

        if self.discretisation_option == "recovered":
            # set up the necessary functions
            self.x_in = Function(self.state.fields(self.field_name).function_space())
            x_rec = Function(options.recovered_space)
            x_brok = Function(options.broken_space)

            # set up interpolators and projectors
            self.x_rec_projector = Recoverer(self.x_in, x_rec, VDG=self.fs, boundary_method=options.boundary_method)  # recovered function
            self.x_brok_projector = Projector(x_rec, x_brok)  # function projected back
            self.xdg_interpolator = Interpolator(self.x_in + x_rec - x_brok, self.xdg_in)
            if self.limiter is not None:
                self.x_brok_interpolator = Interpolator(self.xdg_out, x_brok)
                self.x_out_projector = Recoverer(x_brok, self.x_projected)
            else:
                self.x_out_projector = Projector(self.xdg_out, self.x_projected)

        # setup required functions
        self.dq = Function(self.fs)
        self.q1 = Function(self.fs)

    def pre_apply(self, x_in, discretisation_option):
        """
        Extra steps to discretisation if using an embedded method,
        which might be either the plain embedded method or the
        recovered space scheme.

        :arg x_in: the input set of prognostic fields.
        :arg discretisation option: string specifying which scheme to use.
        """
        if discretisation_option == "embedded_dg":
            try:
                self.xdg_in.interpolate(x_in)
            except NotImplementedError:
                self.xdg_in.project(x_in)

        elif discretisation_option == "recovered":
            self.x_in.assign(x_in)
            self.x_rec_projector.project()
            self.x_brok_projector.project()
            self.xdg_interpolator.interpolate()

    def post_apply(self, x_out, discretisation_option):
        """
        The projection steps, returning a field to its original space
        for an embedded DG scheme. For the case of the
        recovered scheme, there are two options dependent on whether
        the scheme is limited or not.

        :arg x_out: the outgoing field.
        :arg discretisation_option: string specifying which option to use.
        """
        if discretisation_option == "recovered" and self.limiter is not None:
            self.x_brok_interpolator.interpolate()
        self.x_out_projector.project()
        x_out.assign(self.x_projected)

    @abstractproperty
    def lhs(self):
        l = self.residual.label_map(
            lambda t: t.has_label(time_derivative),
            map_if_true=replace_subject(self.dq, self.idx),
            map_if_false=drop)

        return l.form

    @abstractproperty
    def rhs(self):
        r = self.residual.label_map(
            all_terms,
            map_if_true=replace_subject(self.q1, self.idx))

        r = r.label_map(
            lambda t: t.has_label(time_derivative),
            map_if_false=lambda t: -self.dt*t)

        return r.form

    def replace_transport_term(self):
        """
        This routine allows the default transport term to be replaced with a
        different one, specified through the transport options.

        This is necessary because when the prognostic equations are declared,
        the whole transport
        """

        # Extract transport term of equation
        old_transport_term_list = self.residual.label_map(
            lambda t: t.has_label(transport), map_if_false=drop)

        # If there are more transport terms, extract only the one for this variable
        if len(old_transport_term_list.terms) > 1:
            raise NotImplementedError('Cannot replace transport terms when there are more than one')

        # Then we should only have one transport term
        old_transport_term = old_transport_term_list.terms[0]

        # If the transport term has an ibp label, then it could be replaced
        if old_transport_term.has_label(ibp_label) and hasattr(self.options, 'ibp'):
            # Do the options specify a different ibp to the old transport term?
            if old_transport_term.labels['ibp'] != self.options.ibp:
                # Set up a new transport term
                field = self.state.fields(self.field_name)
                test = TestFunction(self.fs)

                # Set up new transport term (depending on the type of transport equation)
                if old_transport_term.labels['transport'] == TransportEquationType.advective:
                    new_transport_term = advection_form(self.state, test, field, ibp=self.options.ibp)
                elif old_transport_term.labels['transport'] == TransportEquationType.conservative:
                    new_transport_term = continuity_form(self.state, test, field, ibp=self.options.ibp)
                else:
                    raise NotImplementedError(f'Replacement of transport term not implemented yet for {old_transport_term.labels["transport"]}')

                # Finally, drop the old transport term and add the new one
                self.residual = self.residual.label_map(
                    lambda t: t.has_label(transport), map_if_true=drop)
                self.residual += subject(new_transport_term, field)

    def replace_transporting_velocity(self, uadv):
        # replace the transporting velocity in any terms that contain it
        if any([t.has_label(transporting_velocity) for t in self.residual]):
            assert uadv is not None
            if uadv == "prognostic":
                self.residual = self.residual.label_map(
                    lambda t: t.has_label(transporting_velocity),
                    map_if_true=lambda t: Term(ufl.replace(
                        t.form, {t.get(transporting_velocity): split(t.get(subject))[0]}), t.labels)
                )
            else:
                self.residual = self.residual.label_map(
                    lambda t: t.has_label(transporting_velocity),
                    map_if_true=lambda t: Term(ufl.replace(
                        t.form, {t.get(transporting_velocity): uadv}), t.labels)
                )
            self.residual = transporting_velocity.update_value(self.residual, uadv)

    @cached_property
    def solver(self):
        # setup solver using lhs and rhs defined in derived class
        problem = NonlinearVariationalProblem(self.lhs-self.rhs, self.dq, bcs=self.bcs)
        solver_name = self.field_name+self.__class__.__name__
        return NonlinearVariationalSolver(problem, solver_parameters=self.solver_parameters, options_prefix=solver_name)

    @abstractmethod
    def apply(self, x_in, x_out):
        """
        Function takes x as input, computes L(x) as defined by the equation,
        and returns x_out as output.

        :arg x: :class:`.Function` object, the input Function.
        :arg x_out: :class:`.Function` object, the output Function.
        """
        pass
Exemple #15
0
class Recoverer(object):
    """
    An object that 'recovers' a field from a low order space
    (e.g. DG0) into a higher order space (e.g. CG1). This encompasses
    the process of interpolating first to a the right space before
    using the :class:`Averager` object, and also automates the
    boundary recovery process. If no boundary method is specified,
    this simply performs the action of the :class: `Averager`.

    :arg v_in: the :class:`ufl.Expr` or
         :class:`.Function` to project. (e.g. a VDG0 function)
    :arg v_out: :class:`.Function` to put the result in. (e.g. a CG1 function)
    :arg VDG: optional :class:`.FunctionSpace`. If not None, v_in is interpolated
         to this space first before recovery happens.
    :arg boundary_method: an Enum object, .
    """
    def __init__(self, v_in, v_out, VDG=None, boundary_method=None):

        # check if v_in is valid
        if isinstance(v_in, expression.Expression) or not isinstance(
                v_in, (ufl.core.expr.Expr, function.Function)):
            raise ValueError(
                "Can only recover UFL expression or Functions not '%s'" %
                type(v_in))

        self.v_in = v_in
        self.v_out = v_out
        self.V = v_out.function_space()
        if VDG is not None:
            self.v = Function(VDG)
            self.interpolator = Interpolator(v_in, self.v)
        else:
            self.v = v_in
            self.interpolator = None

        self.VDG = VDG
        self.boundary_method = boundary_method
        self.averager = Averager(self.v, self.v_out)

        # check boundary method options are valid
        if boundary_method is not None:
            if boundary_method != Boundary_Method.dynamics and boundary_method != Boundary_Method.physics:
                raise ValueError(
                    "Boundary method must be a Boundary_Method Enum object.")
            if VDG is None:
                raise ValueError(
                    "If boundary_method is specified, VDG also needs specifying."
                )

            # now specify things that we'll need if we are doing boundary recovery
            if boundary_method == Boundary_Method.physics:
                # check dimensions
                if self.V.value_size != 1:
                    raise ValueError(
                        'This method only works for scalar functions.')
                self.boundary_recoverer = Boundary_Recoverer(
                    self.v_out, self.v, method=Boundary_Method.physics)
            else:

                mesh = self.V.mesh()
                # this ensures we get the pure function space, not an indexed function space
                V0 = FunctionSpace(mesh,
                                   self.v_in.function_space().ufl_element())
                VDG1 = FunctionSpace(mesh, "DG", 1)
                VCG1 = FunctionSpace(mesh, "CG", 1)

                if self.V.value_size == 1:
                    coords_to_adjust = find_coords_to_adjust(V0, VDG1)

                    self.boundary_recoverer = Boundary_Recoverer(
                        self.v_out,
                        self.v,
                        coords_to_adjust=coords_to_adjust,
                        method=Boundary_Method.dynamics)
                else:
                    VuDG1 = VectorFunctionSpace(mesh, "DG", 1)
                    coords_to_adjust = find_coords_to_adjust(V0, VuDG1)

                    # now, break the problem down into components
                    v_scalars = []
                    v_out_scalars = []
                    self.boundary_recoverers = []
                    self.project_to_scalars_CG = []
                    self.extra_averagers = []
                    coords_to_adjust_list = []
                    for i in range(self.V.value_size):
                        v_scalars.append(Function(VDG1))
                        v_out_scalars.append(Function(VCG1))
                        coords_to_adjust_list.append(
                            Function(VDG1).project(coords_to_adjust[i]))
                        self.project_to_scalars_CG.append(
                            Projector(self.v_out[i], v_out_scalars[i]))
                        self.boundary_recoverers.append(
                            Boundary_Recoverer(
                                v_out_scalars[i],
                                v_scalars[i],
                                method=Boundary_Method.dynamics,
                                coords_to_adjust=coords_to_adjust_list[i]))
                        # need an extra averager that works on the scalar fields rather than the vector one
                        self.extra_averagers.append(
                            Averager(v_scalars[i], v_out_scalars[i]))

                    # the boundary recoverer needs to be done on a scalar fields
                    # so need to extract component and restore it after the boundary recovery is done
                    self.project_to_vector = Projector(
                        as_vector(v_out_scalars), self.v_out)

    def project(self):
        """
        Perform the fully specified recovery.
        """

        if self.interpolator is not None:
            self.interpolator.interpolate()
        self.averager.project()
        if self.boundary_method is not None:
            if self.V.value_size > 1:
                for i in range(self.V.value_size):
                    self.project_to_scalars_CG[i].project()
                    self.boundary_recoverers[i].apply()
                    self.extra_averagers[i].project()
                self.project_to_vector.project()
            else:
                self.boundary_recoverer.apply()
                self.averager.project()
        return self.v_out
    def __init__(self, diagnostic_variables, prognostic_variables, outputting,
                 simulation_parameters):

        self.diagnostic_variables = diagnostic_variables
        self.prognostic_variables = prognostic_variables
        self.outputting = outputting
        self.simulation_parameters = simulation_parameters
        Dt = Constant(simulation_parameters['dt'][-1])
        Ld = simulation_parameters['Ld'][-1]
        u = self.prognostic_variables.u
        Xi = self.prognostic_variables.dXi
        Vu = u.function_space()
        vector_u = True if Vu.ufl_element() == VectorElement else False
        ones = Function(
            VectorFunctionSpace(self.prognostic_variables.mesh, "CG",
                                1)).project(as_vector([Constant(1.0)]))
        self.to_update_constants = False
        self.interpolators = []
        self.projectors = []
        self.solvers = []

        mesh = u.function_space().mesh()
        x, = SpatialCoordinate(mesh)
        alphasq = simulation_parameters['alphasq'][-1]
        periodic = simulation_parameters['periodic'][-1]

        # do peakon data checks here
        true_peakon_data = simulation_parameters['true_peakon_data'][-1]
        if true_peakon_data is not None:
            self.true_peakon_file = Dataset(
                'results/' + true_peakon_data + '/data.nc', 'r')
            # check length of file is correct
            ndump = simulation_parameters['ndump'][-1]
            tmax = simulation_parameters['tmax'][-1]
            dt = simulation_parameters['dt'][-1]
            if len(self.true_peakon_file['time'][:]) != int(tmax /
                                                            (ndump * dt)) + 1:
                raise ValueError(
                    'If reading in true peakon data, the dump frequency must be the same as that used for the true peakon data.'
                    +
                    ' Length of true peakon data as %i, but proposed length is %i'
                    % (len(self.true_peakon_file['time'][:]),
                       int(tmax / (ndump * dt)) + 1))
            if self.true_peakon_file['p'][:].shape != (int(tmax /
                                                           (ndump * dt)) +
                                                       1, ):
                raise ValueError(
                    'True peakon data shape %i must be the same shape as proposed data %i'
                    % ((int(tmax / (ndump * dt)) + 1, ),
                       self.true_peakon_file['p'][:].shape))

        # do peakon data checks here
        true_mean_peakon_data = simulation_parameters['true_mean_peakon_data'][
            -1]
        if true_mean_peakon_data is not None:
            self.true_mean_peakon_file = Dataset(
                'results/' + true_mean_peakon_data + '/data.nc', 'r')
            # check length of file is correct
            ndump = simulation_parameters['ndump'][-1]
            tmax = simulation_parameters['tmax'][-1]
            dt = simulation_parameters['dt'][-1]
            if len(self.true_mean_peakon_file['time'][:]) != int(tmax /
                                                                 (ndump * dt)):
                raise ValueError(
                    'If reading in true peakon data, the dump frequency must be the same as that used for the true peakon data.'
                )
            if self.true_mean_peakon_file['p'][:].shape != (int(
                    tmax / (ndump * dt)), ):
                raise ValueError(
                    'True peakon data must have same shape as proposed data!')

        for key, value in self.diagnostic_variables.fields.items():

            if key == 'uscalar':
                uscalar = self.diagnostic_variables.fields['uscalar']
                u_interpolator = Interpolator(dot(ones, u), uscalar)
                self.interpolators.append(u_interpolator)

            elif key == 'Euscalar':
                Eu = self.prognostic_variables.Eu
                Euscalar = self.diagnostic_variables.fields['Euscalar']
                Eu_interpolator = Interpolator(dot(ones, Eu), Euscalar)
                self.interpolators.append(Eu_interpolator)

            elif key == 'Xiscalar':
                Xi = self.prognostic_variables.dXi
                Xiscalar = self.diagnostic_variables.fields['Xiscalar']
                Xi_interpolator = Interpolator(dot(ones, Xi), Xiscalar)
                self.interpolators.append(Xi_interpolator)

            elif key == 'du':
                if type(u.function_space().ufl_element()) == VectorElement:
                    u_to_project = self.diagnostic_variables.fields['uscalar']
                else:
                    u_to_project = u
                du = self.diagnostic_variables.fields['du']
                du_projector = Projector(u_to_project.dx(0), du)
                self.projectors.append(du_projector)

            elif key == 'jump_du':
                du = self.diagnostic_variables.fields['du']
                jump_du = self.diagnostic_variables.fields['jump_du']
                V = jump_du.function_space()
                jtrial = TrialFunction(V)
                psi = TestFunction(V)
                Lj = psi('+') * abs(jump(du)) * dS
                aj = psi('+') * jtrial('+') * dS
                jprob = LinearVariationalProblem(aj, Lj, jump_du)
                jsolver = LinearVariationalSolver(jprob)
                self.solvers.append(jsolver)

            elif key == 'du_smooth':
                du = self.diagnostic_variables.fields['du']
                du_smooth = self.diagnostic_variables.fields['du_smooth']
                projector = Projector(du, du_smooth)
                self.projectors.append(projector)

            elif key == 'u2_flux':
                gamma = simulation_parameters['gamma'][-1]
                u2_flux = self.diagnostic_variables.fields['u2_flux']
                xis = self.prognostic_variables.pure_xi_list
                xis_x = []
                xis_xxx = []
                CG1 = FunctionSpace(mesh, "CG", 1)
                psi = TestFunction(CG1)
                for xi in xis:
                    xis_x.append(Function(CG1).project(xi.dx(0)))
                for xi_x in xis_x:
                    xi_xxx = Function(CG1)
                    form = (psi * xi_xxx + psi.dx(0) * xi_x.dx(0)) * dx
                    prob = NonlinearVariationalProblem(form, xi_xxx)
                    solver = NonlinearVariationalSolver(prob)
                    solver.solve()
                    xis_xxx.append(xi_xxx)

                flux_expr = 0.0 * x
                for xi, xi_x, xi_xxx in zip(xis, xis_x, xis_xxx):
                    flux_expr += (6 * u.dx(0) * xi + 12 * u * xi_x + gamma *
                                  xi_xxx) * (6 * u.dx(0) * xi + 24 * u * xi_x +
                                             gamma * xi_xxx)
                projector = Projector(flux_expr, u2_flux)
                self.projectors.append(projector)

            elif key == 'a':
                # find  6 * u_x * Xi + gamma * Xi_xxx
                mesh = u.function_space().mesh()
                gamma = simulation_parameters['gamma'][-1]
                a_flux = self.diagnostic_variables.fields['a']
                xis = self.prognostic_variables.pure_xis
                xis_x = []
                xis_xxx = []
                CG1 = FunctionSpace(mesh, "CG", 1)
                psi = TestFunction(CG1)
                for xi in xis:
                    xis_x.append(Function(CG1).project(xi.dx(0)))
                for xi_x in xis_x:
                    xi_xxx = Function(CG1)
                    form = (psi * xi_xxx + psi.dx(0) * xi_x.dx(0)) * dx
                    prob = NonlinearVariationalProblem(form, xi_xxx)
                    solver = NonlinearVariationalSolver(prob)
                    solver.solve()
                    xis_xxx.append(xi_xxx)

                x, = SpatialCoordinate(mesh)
                a_expr = 0.0 * x
                for xi, xi_x, xi_xxx in zip(xis, xis_x, xis_xxx):
                    a_expr += 6 * u.dx(0) * xi + gamma * xi_xxx
                projector = Projector(a_expr, a_flux)
                self.projectors.append(projector)

            elif key == 'b':
                # find 12 * u * Xi_x
                mesh = u.function_space().mesh()
                gamma = simulation_parameters['gamma'][-1]
                b_flux = self.diagnostic_variables.fields['b']
                xis = self.prognostic_variables.pure_xis

                x, = SpatialCoordinate(mesh)
                b_expr = 0.0 * x
                for xi, xi_x, xi_xxx in zip(xis, xis_x, xis_xxx):
                    b_expr += 12 * u * xi.dx(0)
                projector = Projector(b_expr, b_flux)
                self.projectors.append(projector)

            elif key == 'kdv_1':
                # find the first part of the kdv form
                u0 = prognostic_variables.u0
                uh = (u + u0) / 2
                us = Dt * uh + sqrt(Dt) * Xi
                psi = TestFunction(Vu)
                du_1 = self.diagnostic_variables.fields['kdv_1']

                eqn = psi * du_1 * dx - 6 * psi.dx(0) * uh * us * dx
                prob = NonlinearVariationalProblem(eqn, du_1)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'kdv_2':
                # find the second part of the kdv form
                u0 = prognostic_variables.u0
                uh = (u + u0) / 2
                us = Dt * uh + sqrt(Dt) * Xi
                psi = TestFunction(Vu)
                du_2 = self.diagnostic_variables.fields['kdv_2']

                eqn = psi * du_2 * dx + 6 * psi * uh * us.dx(0) * dx
                prob = NonlinearVariationalProblem(eqn, du_2)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'kdv_3':
                # find the third part of the kdv form
                u0 = prognostic_variables.u0
                uh = (u + u0) / 2
                us = Dt * uh + sqrt(Dt) * Xi
                du_3 = self.diagnostic_variables.fields['kdv_3']
                gamma = simulation_parameters['gamma'][-1]

                phi = TestFunction(Vu)
                F = Function(Vu)

                eqn = (phi * F * dx + phi.dx(0) * us.dx(0) * dx)
                prob = NonlinearVariationalProblem(eqn, F)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

                self.projectors.append(Projector(-gamma * F.dx(0), du_3))

                # nu = TestFunction(Vu)
                # back_eqn = nu * du_3 * dx - gamma * nu.dx(0) * F * dx
                # back_prob = NonlinearVariationalProblem(back_eqn, du_3)
                # back_solver = NonlinearVariationalSolver(back_prob)
                # self.solvers.append(solver)

            elif key == 'm':

                m = self.diagnostic_variables.fields['m']
                phi = TestFunction(Vu)
                eqn = phi * m * dx - phi * u * dx - alphasq * phi.dx(0) * u.dx(
                    0) * dx
                prob = NonlinearVariationalProblem(eqn, m)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'u_xx':

                u_xx = self.diagnostic_variables.fields['u_xx']
                phi = TestFunction(Vu)
                eqn = phi * u_xx * dx + phi.dx(0) * u_xx.dx(0) * dx
                prob = NonlinearVariationalProblem(eqn, u_xx)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'u_sde':
                self.to_update_constants = True
                self.Ld = Ld
                self.alphasq = alphasq
                self.p = Constant(1.0 * 0.5 * (1 + exp(-Ld / sqrt(alphasq))) /
                                  (1 - exp(-Ld / sqrt(alphasq))))
                self.q = Constant(Ld / 2)

                u_sde = self.diagnostic_variables.fields['u_sde']
                if periodic:
                    expr = conditional(
                        x < self.q - Ld / 2,
                        self.p * ((exp(-(x - self.q + Ld) / sqrt(alphasq)) +
                                   exp(-Ld / sqrt(alphasq)) * exp(
                                       (x - self.q + Ld) / sqrt(alphasq))) /
                                  (1 - exp(-Ld / sqrt(alphasq)))),
                        conditional(
                            x < self.q + Ld / 2,
                            self.p * ((exp(-sqrt((self.q - x)**2 / alphasq)) +
                                       exp(-Ld / sqrt(alphasq)) *
                                       exp(sqrt((self.q - x)**2 / alphasq))) /
                                      (1 - exp(-Ld / sqrt(alphasq)))),
                            self.p *
                            ((exp(-(self.q + Ld - x) / sqrt(alphasq)) +
                              exp(-Ld / sqrt(alphasq) * exp(
                                  (self.q + Ld - x) / sqrt(alphasq)))) /
                             (1 - exp(-Ld / sqrt(alphasq))))))
                else:
                    expr = conditional(
                        x < self.q - Ld / 2,
                        self.p * exp(-(x - self.q + Ld) / sqrt(alphasq)),
                        conditional(
                            x < self.q + Ld / 2,
                            self.p * exp(-sqrt((self.q - x)**2 / alphasq)),
                            self.p * exp(-(self.q + Ld - x) / sqrt(alphasq))))

                self.interpolators.append(Interpolator(expr, u_sde))

            elif key == 'u_sde_weak':
                u_sde = self.diagnostic_variables.fields['u_sde']
                u_sde_weak = self.diagnostic_variables.fields['u_sde_weak']
                psi = TestFunction(Vu)

                eqn = psi * u_sde_weak * dx - psi * (u - u_sde) * dx
                prob = NonlinearVariationalProblem(eqn, u_sde_weak)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'u_sde_mean':
                self.to_update_constants = True
                self.p = Constant(1.0)
                self.q = Constant(Ld / 2)

                if periodic:
                    raise NotImplementedError(
                        'u_sde_mean not yet implemented for periodic peakon')

                u_sde = self.diagnostic_variables.fields['u_sde_mean']
                expr = conditional(
                    x < self.q - Ld / 2,
                    self.p * exp(-(x - self.q + Ld) / sqrt(alphasq)),
                    conditional(
                        x < self.q + Ld / 2,
                        self.p * exp(-sqrt((self.q - x)**2 / alphasq)),
                        self.p * exp(-(self.q + Ld - x) / sqrt(alphasq))))
                self.interpolators.append(Interpolator(expr, u_sde))

            elif key == 'u_sde_weak_mean':
                u_sde = self.diagnostic_variables.fields['u_sde_mean']
                u_sde_weak = self.diagnostic_variables.fields[
                    'u_sde_weak_mean']
                psi = TestFunction(Vu)

                eqn = psi * u_sde_weak * dx - psi * (u - u_sde) * dx
                prob = NonlinearVariationalProblem(eqn, u_sde_weak)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'pure_xi':
                pure_xi = 0.0 * x
                for xi in self.prognostic_variables.pure_xi_list:
                    if vector_u:
                        pure_xi += dot(ones, xi)
                    else:
                        pure_xi += xi
                Xiscalar = self.diagnostic_variables.fields['pure_xi']
                Xi_interpolator = Interpolator(pure_xi, Xiscalar)
                self.interpolators.append(Xi_interpolator)

            elif key == 'pure_xi_x':
                pure_xi_x = 0.0 * x
                for xix in self.prognostic_variables.pure_xi_x_list:
                    if vector_u:
                        pure_xi_x += dot(ones, xix)
                    else:
                        pure_xi_x += xix
                Xiscalar = self.diagnostic_variables.fields['pure_xi_x']
                Xi_interpolator = Interpolator(pure_xi_x, Xiscalar)
                self.interpolators.append(Xi_interpolator)

            elif key == 'pure_xi_xx':
                pure_xi_xx = 0.0 * x
                for xixx in self.prognostic_variables.pure_xi_xx_list:
                    if vector_u:
                        pure_xi_xx += dot(ones, xixx)
                    else:
                        pure_xi_xx += xixx
                Xiscalar = self.diagnostic_variables.fields['pure_xi_xx']
                Xi_interpolator = Interpolator(pure_xi_xx, Xiscalar)
                self.interpolators.append(Xi_interpolator)

            elif key == 'pure_xi_xxx':
                pure_xi_xxx = 0.0 * x
                for xixxx in self.prognostic_variables.pure_xi_xxx_list:
                    if vector_u:
                        pure_xi_xxx += dot(ones, xixxx)
                    else:
                        pure_xi_xxx += xixxx
                Xiscalar = self.diagnostic_variables.fields['pure_xi_xxx']
                Xi_interpolator = Interpolator(pure_xi_xxx, Xiscalar)
                self.interpolators.append(Xi_interpolator)

            elif key == 'pure_xi_xxxx':
                pure_xi_xxxx = 0.0 * x
                for xixxxx in self.prognostic_variables.pure_xi_xx_list:
                    if vector_u:
                        pure_xi_xxxx += dot(ones, xixxxx)
                    else:
                        pure_xi_xxxx += xixxxx
                Xiscalar = self.diagnostic_variables.fields['pure_xi_xxxx']
                Xi_interpolator = Interpolator(pure_xi_xxxx, Xiscalar)
                self.interpolators.append(Xi_interpolator)

            else:
                raise NotImplementedError('Diagnostic %s not yet implemented' %
                                          key)