示例#1
0
    def _setup(self, state, field, options):

        if options.name in ["embedded_dg", "recovered"]:
            self.fs = options.embedding_space
            self.xdg_in = Function(self.fs)
            self.xdg_out = Function(self.fs)
            self.x_projected = Function(field.function_space())
            parameters = {'ksp_type': 'cg',
                          'pc_type': 'bjacobi',
                          'sub_pc_type': 'ilu'}
            self.Projector = Projector(self.xdg_out, self.x_projected,
                                       solver_parameters=parameters)

        if options.name == "recovered":
            # set up the necessary functions
            self.x_in = Function(field.function_space())
            x_rec = Function(options.recovered_space)
            x_brok = Function(options.broken_space)

            # set up interpolators and projectors
            self.x_rec_projector = Recoverer(self.x_in, x_rec, VDG=self.fs, boundary_method=options.boundary_method)  # recovered function
            self.x_brok_projector = Projector(x_rec, x_brok)  # function projected back
            self.xdg_interpolator = Interpolator(self.x_in + x_rec - x_brok, self.xdg_in)
            if self.limiter is not None:
                self.x_brok_interpolator = Interpolator(self.xdg_out, x_brok)
                self.x_out_projector = Recoverer(x_brok, self.x_projected)
示例#2
0
    def _prepare_output(self, function, cg):
        from firedrake import FunctionSpace, VectorFunctionSpace, \
            TensorFunctionSpace, Function, Projector, Interpolator

        name = function.name()

        # Need to project/interpolate?
        # If space is linear and continuity of output space matches
        # continuity of current space, then we can just use the
        # input function.
        if is_linear(function.function_space()) and \
           is_dg(function.function_space()) == (not cg) and \
           is_cg(function.function_space()) == cg:
            return OFunction(array=get_array(function),
                             name=name, function=function)

        # OK, let's go and do it.
        if cg:
            family = "Lagrange"
        else:
            family = "Discontinuous Lagrange"

        output = self._output_functions.get(function)
        if output is None:
            # Build appropriate space for output function.
            shape = function.ufl_shape
            if len(shape) == 0:
                V = FunctionSpace(function.ufl_domain(), family, 1)
            elif len(shape) == 1:
                if numpy.prod(shape) > 3:
                    raise ValueError("Can't write vectors with more than 3 components")
                V = VectorFunctionSpace(function.ufl_domain(), family, 1,
                                        dim=shape[0])
            elif len(shape) == 2:
                if numpy.prod(shape) > 9:
                    raise ValueError("Can't write tensors with more than 9 components")
                V = TensorFunctionSpace(function.ufl_domain(), family, 1,
                                        shape=shape)
            else:
                raise ValueError("Unsupported shape %s" % (shape, ))
            output = Function(V)
            self._output_functions[function] = output

        if self.project:
            projector = self._mappers.get(function)
            if projector is None:
                projector = Projector(function, output)
                self._mappers[function] = projector
            projector.project()
        else:
            interpolator = self._mappers.get(function)
            if interpolator is None:
                interpolator = Interpolator(function, output)
                self._mappers[function] = interpolator
            interpolator.interpolate()

        return OFunction(array=get_array(output), name=name, function=output)
示例#3
0
    def _prepare_output(self, function, cg):
        from firedrake import FunctionSpace, VectorFunctionSpace, \
            TensorFunctionSpace, Function, Projector, Interpolator

        name = function.name()

        # Need to project/interpolate?
        # If space is linear and continuity of output space matches
        # continuity of current space, then we can just use the
        # input function.
        if is_linear(function.function_space()) and \
           is_dg(function.function_space()) == (not cg) and \
           is_cg(function.function_space()) == cg:
            return OFunction(array=get_array(function),
                             name=name, function=function)

        # OK, let's go and do it.
        if cg:
            family = "Lagrange"
        else:
            family = "Discontinuous Lagrange"

        output = self._output_functions.get(function)
        if output is None:
            # Build appropriate space for output function.
            shape = function.ufl_shape
            if len(shape) == 0:
                V = FunctionSpace(function.ufl_domain(), family, 1)
            elif len(shape) == 1:
                if numpy.prod(shape) > 3:
                    raise ValueError("Can't write vectors with more than 3 components")
                V = VectorFunctionSpace(function.ufl_domain(), family, 1,
                                        dim=shape[0])
            elif len(shape) == 2:
                if numpy.prod(shape) > 9:
                    raise ValueError("Can't write tensors with more than 9 components")
                V = TensorFunctionSpace(function.ufl_domain(), family, 1,
                                        shape=shape)
            else:
                raise ValueError("Unsupported shape %s" % (shape, ))
            output = Function(V)
            self._output_functions[function] = output

        if self.project:
            projector = self._mappers.get(function)
            if projector is None:
                projector = Projector(function, output)
                self._mappers[function] = projector
            projector.project()
        else:
            interpolator = self._mappers.get(function)
            if interpolator is None:
                interpolator = Interpolator(function, output)
                self._mappers[function] = interpolator
            interpolator.interpolate()

        return OFunction(array=get_array(output), name=name, function=output)
示例#4
0
    def _prepare_output(self, function, max_elem):
        from firedrake import FunctionSpace, VectorFunctionSpace, \
            TensorFunctionSpace, Function, Projector, Interpolator

        name = function.name()
        # Need to project/interpolate?
        # If space is not the max element, we can do so.
        if function.ufl_element == max_elem:
            return OFunction(array=get_array(function),
                             name=name,
                             function=function)
        #  OK, let's go and do it.
        shape = function.ufl_shape
        output = self._output_functions.get(function)
        if output is None:
            # Build appropriate space for output function.
            shape = function.ufl_shape
            if len(shape) == 0:
                V = FunctionSpace(function.ufl_domain(), max_elem)
            elif len(shape) == 1:
                if numpy.prod(shape) > 3:
                    raise ValueError(
                        "Can't write vectors with more than 3 components")
                V = VectorFunctionSpace(function.ufl_domain(),
                                        max_elem,
                                        dim=shape[0])
            elif len(shape) == 2:
                if numpy.prod(shape) > 9:
                    raise ValueError(
                        "Can't write tensors with more than 9 components")
                V = TensorFunctionSpace(function.ufl_domain(),
                                        max_elem,
                                        shape=shape)
            else:
                raise ValueError("Unsupported shape %s" % (shape, ))
            output = Function(V)
            self._output_functions[function] = output
        if self.project:
            projector = self._mappers.get(function)
            if projector is None:
                projector = Projector(function, output)
                self._mappers[function] = projector
            projector.project()
        else:
            interpolator = self._mappers.get(function)
            if interpolator is None:
                interpolator = Interpolator(function, output)
                self._mappers[function] = interpolator
            interpolator.interpolate()

        return OFunction(array=get_array(output), name=name, function=output)
示例#5
0
    def __init__(self, state, accretion=True, accumulation=True):
        super().__init__(state)

        # obtain our fields
        self.water_c = state.fields('water_c')
        self.rain = state.fields('rain')

        # declare function space
        Vt = self.water_c.function_space()

        # define some parameters as attributes
        dt = state.timestepping.dt
        k_1 = Constant(0.001)  # accretion rate in 1/s
        k_2 = Constant(2.2)  # accumulation rate in 1/s
        a = Constant(0.001)  # min cloud conc in kg/kg
        b = Constant(0.875)  # power for rain in accumulation

        # make default rates to be zero
        accr_rate = Constant(0.0)
        accu_rate = Constant(0.0)

        if accretion:
            accr_rate = k_1 * (self.water_c - a)
        if accumulation:
            accu_rate = k_2 * self.water_c * self.rain**b

        # make coalescence rate function, that needs to be the same for all updates in one time step
        coalesce_rate = Function(Vt)

        # adjust coalesce rate using min_value so negative cloud concentration doesn't occur
        self.lim_coalesce_rate = Interpolator(
            conditional(
                self.rain < 0.0,  # if rain is negative do only accretion
                conditional(accr_rate < 0.0, 0.0,
                            min_value(accr_rate, self.water_c / dt)),
                # don't turn rain back into cloud
                conditional(
                    accr_rate + accu_rate < 0.0,
                    0.0,
                    # if accretion rate is negative do only accumulation
                    conditional(
                        accr_rate < 0.0, min_value(accu_rate,
                                                   self.water_c / dt),
                        min_value(accr_rate + accu_rate, self.water_c / dt)))),
            coalesce_rate)

        # tell the prognostic fields what to update to
        self.water_c_new = Interpolator(self.water_c - dt * coalesce_rate, Vt)
        self.rain_new = Interpolator(self.rain + dt * coalesce_rate, Vt)
class StochasticFunctions(object):
    """
    The stochastic functions object.

    :arg prognostic_variables: a PrognosticVariables object.
    :arg simulation_parameters: a dictionary of the simulation parameters.
    """

    def __init__(self, prognostic_variables, simulation_parameters):

        mesh = simulation_parameters['mesh'][-1]
        x, = SpatialCoordinate(mesh)
        Ld = simulation_parameters['Ld'][-1]
        self.scheme = simulation_parameters['scheme'][-1]

        self.dt = simulation_parameters['dt'][-1]
        self.num_Xis = simulation_parameters['num_Xis'][-1]
        self.Xi_family = simulation_parameters['Xi_family'][-1]
        self.dXi = prognostic_variables.dXi
        self.dWs = [Constant(0.0) for dw in range(self.num_Xis)]
        self.dW_nums = prognostic_variables.dW_nums
        self.Xi_functions = []
        self.nXi_updates = simulation_parameters['nXi_updates'][-1]
        self.smooth_t = simulation_parameters['smooth_t'][-1]
        self.fixed_dW = simulation_parameters['fixed_dW'][-1]

        if self.smooth_t is not None and self.nXi_updates > 1:
            raise ValueError('Prescribing forcing and including multiple Xi updates are not compatible.')

        if self.smooth_t is not None or self.fixed_dW is not None:
            print('WARNING: Remember to change sigma to sigma * sqrt(dt) with the prescribed forcing option or the fixed_dW option.')


        seed = simulation_parameters['seed'][-1]
        np.random.seed(seed)

        # make sure sigma is a Constant
        if self.num_Xis != 0:
            if isinstance(simulation_parameters['sigma'][-1], Constant):
                self.sigma = simulation_parameters['sigma'][-1]
            else:
                self.sigma = Constant(simulation_parameters['sigma'][-1])
        else:
            self.sigma = Constant(0.0)

        self.pure_xi_list = prognostic_variables.pure_xi_list
        self.pure_xi_x_list = prognostic_variables.pure_xi_x_list
        self.pure_xi_xx_list = prognostic_variables.pure_xi_xx_list
        self.pure_xi_xxx_list = prognostic_variables.pure_xi_xxx_list
        self.pure_xi_xxxx_list = prognostic_variables.pure_xi_xxxx_list
        for xi in range(self.num_Xis):
            self.pure_xi_list.append(Function(self.dXi.function_space()))
            self.pure_xi_x_list.append(Function(self.dXi.function_space()))
            self.pure_xi_xx_list.append(Function(self.dXi.function_space()))
            self.pure_xi_xxx_list.append(Function(self.dXi.function_space()))
            self.pure_xi_xxxx_list.append(Function(self.dXi.function_space()))


        if self.Xi_family == 'sines':
            for n in range(self.num_Xis):
                if (n+1) % 2 == 1:
                    self.Xi_functions.append(self.sigma * sin(2*(n+1)*pi*x/Ld))
                else:
                    self.Xi_functions.append(self.sigma * cos(2*(n+1)*pi*x/Ld))

        elif self.Xi_family == 'double_sines':
            for n in range(self.num_Xis):
                if (n+1) % 2 == 1:
                    self.Xi_functions.append(self.sigma * sin(4*(n+1)*pi*x/Ld))
                else:
                    self.Xi_functions.append(self.sigma * cos(4*(n+1)*pi*x/Ld))

        elif self.Xi_family == 'high_freq_sines':
            for n in range(self.num_Xis):
                if (n+1) % 2 == 1:
                    self.Xi_functions.append(self.sigma * sin((2*(n+1)+10)*pi*x/Ld))
                else:
                    self.Xi_functions.append(self.sigma * cos((2*(n+1)+10)*pi*x/Ld))

        elif self.Xi_family == 'gaussians':
            for n in range(self.num_Xis):
                self.Xi_functions.append(self.sigma * 0.5*self.num_Xis*exp(-((x-Ld*(n+1)/(self.num_Xis +1.0))/2.)**2))

        elif self.Xi_family == 'quadratic':
            if self.num_Xis > 1:
                raise NotImplementedError('Quadratic Xi not yet implemented for more than one Xi')
            else:
                self.Xi_functions.append(32/(Ld*Ld)*conditional(x > Ld/4,
                                                     conditional(x > 3*Ld/8,
                                                                 conditional(x > 5*Ld/8,
                                                                             conditional(x < 3*Ld/4,
                                                                                         self.sigma * (x - 3*Ld/4)**2,
                                                                                         0.0),
                                                                             (x-Ld/2)**2+Ld**2/32),
                                                                 (x-Ld/4)**2),
                                                     0.0))
        elif self.Xi_family == 'proper_peak':
            if self.num_Xis > 1:
                raise NotImplementedError('Quadratic Xi not yet implemented for more than one Xi')
            else:
                self.Xi_functions.append(self.sigma * 0.5*2/(exp(x-Ld/2)+exp(-x+Ld/2)))

        elif self.Xi_family == 'constant':
            if self.num_Xis > 1:
                raise NotImplementedError('Constant Xi not yet implemented for more than one Xi')
            else:
                self.Xi_functions.append(self.sigma * (sin(0*pi*x/Ld)+1))


        else:
            raise NotImplementedError('Xi_family %s not implemented' % self.Xi_family)

        # make lists of functions for xi_x, xi_xx and xi_xxx
        if self.scheme in ['hydrodynamic', 'LASCH_hydrodynamic']:
            self.dXi_x = prognostic_variables.dXi_x
            self.dXi_xx = prognostic_variables.dXi_xx

            self.Xi_x_functions = []
            self.Xi_xx_functions = []

            for Xi_expr in self.Xi_functions:
                Xi_x_function = Function(self.dXi_x.function_space())
                Xi_xx_function = Function(self.dXi_xx.function_space())

                phi_x = TestFunction(self.dXi_x.function_space())
                phi_xx = TestFunction(self.dXi_xx.function_space())

                Xi_x_eqn = phi_x * Xi_x_function * dx + phi_x.dx(0) * Xi_expr * dx
                Xi_xx_eqn = phi_xx * Xi_xx_function * dx + phi_xx.dx(0) * Xi_x_function * dx

                Xi_x_problem = NonlinearVariationalProblem(Xi_x_eqn, Xi_x_function)
                Xi_xx_problem = NonlinearVariationalProblem(Xi_xx_eqn, Xi_xx_function)

                Xi_x_solver = NonlinearVariationalSolver(Xi_x_problem)
                Xi_xx_solver = NonlinearVariationalSolver(Xi_xx_problem)

                # for some reason these solvers don't work for constant Xi functions
                # so just manually make the derivatives be zero
                if self.Xi_family == 'constant':
                    Xi_x_function.interpolate(0.0*x)
                    Xi_xx_function.interpolate(0.0*x)
                else:
                    Xi_x_solver.solve()
                    Xi_xx_solver.solve()

                self.Xi_x_functions.append(Xi_x_function)
                self.Xi_xx_functions.append(Xi_xx_function)

        # now make a master xi
        Xi_expr = 0.0*x

        for dW, Xi_function, pure_xi, pure_xi_x, pure_xi_xx, pure_xi_xxx, pure_xi_xxxx in zip(self.dWs, self.Xi_functions, self.pure_xi_list, self.pure_xi_x_list, self.pure_xi_xx_list, self.pure_xi_xxx_list, self.pure_xi_xxxx_list):
            Xi_expr += dW * Xi_function
            if self.scheme in ['upwind', 'LASCH']:
                pure_xi.interpolate(as_vector([Xi_function]))
                pure_xi_x.project(as_vector([Xi_function.dx(0)]))

                CG1 = FunctionSpace(mesh, "CG", 1)
                psi =  TestFunction(CG1)
                xixx_scalar = Function(CG1)
                xixx_eqn = psi * xixx_scalar * dx + psi.dx(0) * Xi_function.dx(0) * dx
                prob = NonlinearVariationalProblem(xixx_eqn, xixx_scalar)
                solver = NonlinearVariationalSolver(prob)
                solver.solve()
                pure_xi_xx.interpolate(as_vector([xixx_scalar]))

            else:
                pure_xi.interpolate(Xi_function)

                # I guess we can't take the gradient of constants
                if self.Xi_family != 'constant':
                    pure_xi_x.project(Xi_function.dx(0))
                    pure_xi_xx.project(pure_xi_x.dx(0))
                    pure_xi_xxx.project(pure_xi_xx.dx(0))
                    pure_xi_xxxx.project(pure_xi_xxx.dx(0))

        if self.scheme in ['upwind', 'LASCH']:
            self.dXi_interpolator = Interpolator(as_vector([Xi_expr]), self.dXi)
        else:
            self.dXi_interpolator = Interpolator(Xi_expr, self.dXi)

        if self.scheme in ['hydrodynamic', 'LASCH_hydrodynamic']:

            # initialise blank expressions
            Xi_x_expr = 0.0*x
            Xi_xx_expr = 0.0*x

            # make full expressions by adding all dW * Xi_xs
            for dW, Xi_x_function, Xi_xx_function in zip(self.dWs, self.Xi_x_functions, self.Xi_xx_functions):
                Xi_x_expr += dW * Xi_x_function
                Xi_xx_expr += dW * Xi_xx_function

            self.dXi_x_interpolator = Interpolator(Xi_x_expr, self.dXi_x)
            self.dXi_xx_interpolator = Interpolator(Xi_xx_expr, self.dXi_xx)


    def update(self, t):
        """
        Updates the Xi function for the next time step.
        """
        if self.num_Xis > 0:
            # Try to calculate dW numbers separately

            # For nXi_updates > 1 then the ordering of calls to np.random.randn
            # needs to be equivalent to that for the corresponding smaller dt
            if self.nXi_updates > 1:
                self.dW_nums[:] = 0.0
                for j in range(self.nXi_updates):
                    for i in range(self.num_Xis):
                        self.dW_nums[i] += np.random.randn() * np.sqrt(self.dt/self.nXi_updates)

            else:
                for i in range(self.num_Xis):
                    if self.smooth_t is not None:
                        self.dW_nums[i] = self.smooth_t(t)
                    elif self.fixed_dW is not None:
                        self.dW_nums[i] = self.fixed_dW * np.sqrt(self.dt)
                    else:
                        self.dW_nums[i] = np.random.randn() * np.sqrt(self.dt)

            # This is to ensure we stick close to what the original code did
            [dw.assign(dw_num) for dw, dw_num in zip(self.dWs, self.dW_nums)]
            self.dXi_interpolator.interpolate()

            if self.scheme in ['hydrodynamic', 'LASCH_hydrodynamic']:
                self.dXi_x_interpolator.interpolate()
                self.dXi_xx_interpolator.interpolate()
        else:
            pass
示例#7
0
class Recoverer(object):
    """
    An object that 'recovers' a field from a low order space
    (e.g. DG0) into a higher order space (e.g. CG1). This encompasses
    the process of interpolating first to a the right space before
    using the :class:`Averager` object, and also automates the
    boundary recovery process. If no boundary method is specified,
    this simply performs the action of the :class: `Averager`.

    :arg v_in: the :class:`ufl.Expr` or
         :class:`.Function` to project. (e.g. a VDG0 function)
    :arg v_out: :class:`.Function` to put the result in. (e.g. a CG1 function)
    :arg VDG: optional :class:`.FunctionSpace`. If not None, v_in is interpolated
         to this space first before recovery happens.
    :arg boundary_method: an Enum object, .
    """
    def __init__(self, v_in, v_out, VDG=None, boundary_method=None):

        # check if v_in is valid
        if isinstance(v_in, expression.Expression) or not isinstance(
                v_in, (ufl.core.expr.Expr, function.Function)):
            raise ValueError(
                "Can only recover UFL expression or Functions not '%s'" %
                type(v_in))

        self.v_in = v_in
        self.v_out = v_out
        self.V = v_out.function_space()
        if VDG is not None:
            self.v = Function(VDG)
            self.interpolator = Interpolator(v_in, self.v)
        else:
            self.v = v_in
            self.interpolator = None

        self.VDG = VDG
        self.boundary_method = boundary_method
        self.averager = Averager(self.v, self.v_out)

        # check boundary method options are valid
        if boundary_method is not None:
            if boundary_method != Boundary_Method.dynamics and boundary_method != Boundary_Method.physics:
                raise ValueError(
                    "Boundary method must be a Boundary_Method Enum object.")
            if VDG is None:
                raise ValueError(
                    "If boundary_method is specified, VDG also needs specifying."
                )

            # now specify things that we'll need if we are doing boundary recovery
            if boundary_method == Boundary_Method.physics:
                # check dimensions
                if self.V.value_size != 1:
                    raise ValueError(
                        'This method only works for scalar functions.')
                self.boundary_recoverer = Boundary_Recoverer(
                    self.v_out, self.v, method=Boundary_Method.physics)
            else:

                mesh = self.V.mesh()
                # this ensures we get the pure function space, not an indexed function space
                V0 = FunctionSpace(mesh,
                                   self.v_in.function_space().ufl_element())
                VCG1 = FunctionSpace(mesh, "CG", 1)
                if V0.extruded:
                    cell = mesh._base_mesh.ufl_cell().cellname()
                    DG1_hori_elt = FiniteElement("DG",
                                                 cell,
                                                 1,
                                                 variant="equispaced")
                    DG1_vert_elt = FiniteElement("DG",
                                                 interval,
                                                 1,
                                                 variant="equispaced")
                    DG1_element = TensorProductElement(DG1_hori_elt,
                                                       DG1_vert_elt)
                else:
                    cell = mesh.ufl_cell().cellname()
                    DG1_element = FiniteElement("DG",
                                                cell,
                                                1,
                                                variant="equispaced")
                VDG1 = FunctionSpace(mesh, DG1_element)

                if self.V.value_size == 1:
                    coords_to_adjust = find_coords_to_adjust(V0, VDG1)

                    self.boundary_recoverer = Boundary_Recoverer(
                        self.v_out,
                        self.v,
                        coords_to_adjust=coords_to_adjust,
                        method=Boundary_Method.dynamics)
                else:
                    VuDG1 = VectorFunctionSpace(mesh, DG1_element)
                    coords_to_adjust = find_coords_to_adjust(V0, VuDG1)

                    # now, break the problem down into components
                    v_scalars = []
                    v_out_scalars = []
                    self.boundary_recoverers = []
                    self.project_to_scalars_CG = []
                    self.extra_averagers = []
                    coords_to_adjust_list = []
                    for i in range(self.V.value_size):
                        v_scalars.append(Function(VDG1))
                        v_out_scalars.append(Function(VCG1))
                        coords_to_adjust_list.append(
                            Function(VDG1).project(coords_to_adjust[i]))
                        self.project_to_scalars_CG.append(
                            Projector(self.v_out[i], v_out_scalars[i]))
                        self.boundary_recoverers.append(
                            Boundary_Recoverer(
                                v_out_scalars[i],
                                v_scalars[i],
                                method=Boundary_Method.dynamics,
                                coords_to_adjust=coords_to_adjust_list[i]))
                        # need an extra averager that works on the scalar fields rather than the vector one
                        self.extra_averagers.append(
                            Averager(v_scalars[i], v_out_scalars[i]))

                    # the boundary recoverer needs to be done on a scalar fields
                    # so need to extract component and restore it after the boundary recovery is done
                    self.interpolate_to_vector = Interpolator(
                        as_vector(v_out_scalars), self.v_out)

    def project(self):
        """
        Perform the fully specified recovery.
        """

        if self.interpolator is not None:
            self.interpolator.interpolate()
        self.averager.project()
        if self.boundary_method is not None:
            if self.V.value_size > 1:
                for i in range(self.V.value_size):
                    self.project_to_scalars_CG[i].project()
                    self.boundary_recoverers[i].apply()
                    self.extra_averagers[i].project()
                self.interpolate_to_vector.interpolate()
            else:
                self.boundary_recoverer.apply()
                self.averager.project()
        return self.v_out
示例#8
0
class Evaporation(Physics):
    """
    The process of evaporation of rain into water vapour
    with the associated latent heat change. This
    parametrization comes from Klemp and Wilhelmson (1978).

    :arg state: :class:`.State.` object.
    """

    def __init__(self, state):
        super().__init__(state)

        # obtain our fields
        self.theta = state.fields('theta')
        self.water_v = state.fields('vapour_mixing_ratio')
        self.rain = state.fields('rain_mixing_ratio')
        rho = state.fields('rho')
        try:
            water_c = state.fields('cloud_liquid_mixing_ratio')
            water_l = self.rain + water_c
        except NotImplementedError:
            water_l = self.rain

        # declare function space
        Vt = self.theta.function_space()

        # make rho variables
        # we recover rho into theta space
        h_deg = rho.function_space().ufl_element().degree()[0]
        v_deg = rho.function_space().ufl_element().degree()[1]
        if v_deg == 0 and h_deg == 0:
            boundary_method = Boundary_Method.physics
        else:
            boundary_method = None
        Vt_broken = FunctionSpace(state.mesh, BrokenElement(Vt.ufl_element()))
        rho_averaged = Function(Vt)
        self.rho_recoverer = Recoverer(rho, rho_averaged, VDG=Vt_broken, boundary_method=boundary_method)

        # define some parameters as attributes
        dt = state.dt
        R_d = state.parameters.R_d
        cp = state.parameters.cp
        cv = state.parameters.cv
        c_pv = state.parameters.c_pv
        c_pl = state.parameters.c_pl
        c_vv = state.parameters.c_vv
        R_v = state.parameters.R_v

        # make useful fields
        exner = thermodynamics.exner_pressure(state.parameters, rho_averaged, self.theta)
        T = thermodynamics.T(state.parameters, self.theta, exner, r_v=self.water_v)
        p = thermodynamics.p(state.parameters, exner)
        L_v = thermodynamics.Lv(state.parameters, T)
        R_m = R_d + R_v * self.water_v
        c_pml = cp + c_pv * self.water_v + c_pl * water_l
        c_vml = cv + c_vv * self.water_v + c_pl * water_l

        # use Teten's formula to calculate w_sat
        w_sat = thermodynamics.r_sat(state.parameters, T, p)

        # expression for ventilation factor
        a = Constant(1.6)
        b = Constant(124.9)
        c = Constant(0.2046)
        C = a + b * (rho_averaged * self.rain) ** c

        # make appropriate condensation rate
        f = Constant(5.4e5)
        g = Constant(2.55e6)
        h = Constant(0.525)
        dot_r_evap = (((1 - self.water_v / w_sat) * C * (rho_averaged * self.rain) ** h)
                      / (rho_averaged * (f + g / (p * w_sat))))

        # make evap_rate function, needs to be the same for all updates in one time step
        evap_rate = Function(Vt)

        # adjust evap rate so negative rain doesn't occur
        self.lim_evap_rate = Interpolator(conditional(dot_r_evap < 0,
                                                      0.0,
                                                      conditional(self.rain < 0.0,
                                                                  0.0,
                                                                  min_value(dot_r_evap, self.rain / dt))),
                                          evap_rate)

        # tell the prognostic fields what to update to
        self.water_v_new = Interpolator(self.water_v + dt * evap_rate, Vt)
        self.rain_new = Interpolator(self.rain - dt * evap_rate, Vt)
        self.theta_new = Interpolator(self.theta
                                      * (1.0 - dt * evap_rate
                                         * (cv * L_v / (c_vml * cp * T)
                                            - R_v * cv * c_pml / (R_m * cp * c_vml))), Vt)

    def apply(self):
        self.rho_recoverer.project()
        self.lim_evap_rate.interpolate()
        self.theta.assign(self.theta_new.interpolate())
        self.water_v.assign(self.water_v_new.interpolate())
        self.rain.assign(self.rain_new.interpolate())
示例#9
0
    def __init__(self, state, field, equation=None, *, solver_parameters=None,
                 limiter=None):

        if equation is not None:

            self.state = state
            self.field = field
            self.equation = equation
            # get ubar from the equation class
            self.ubar = self.equation.ubar
            self.dt = self.state.timestepping.dt

            # get default solver options if none passed in
            if solver_parameters is None:
                self.solver_parameters = equation.solver_parameters
            else:
                self.solver_parameters = solver_parameters
                if state.output.log_level == DEBUG:
                    self.solver_parameters["ksp_monitor_true_residual"] = True

            self.limiter = limiter

        # check to see if we are using an embedded DG method - if we are then
        # the projector and output function will have been set up in the
        # equation class and we can get the correct function space from
        # the output function.
        if isinstance(equation, EmbeddedDGAdvection):
            # check that the field and the equation are compatible
            if equation.V0 != field.function_space():
                raise ValueError('The field to be advected is not compatible with the equation used.')
            self.embedded_dg = True
            fs = equation.space
            self.xdg_in = Function(fs)
            self.xdg_out = Function(fs)
            self.x_projected = Function(field.function_space())
            parameters = {'ksp_type': 'cg',
                          'pc_type': 'bjacobi',
                          'sub_pc_type': 'ilu'}
            self.Projector = Projector(self.xdg_out, self.x_projected,
                                       solver_parameters=parameters)
            self.recovered = equation.recovered
            if self.recovered:
                # set up the necessary functions
                self.x_in = Function(field.function_space())
                x_adv = Function(fs)
                x_rec = Function(equation.V_rec)
                x_brok = Function(equation.V_brok)

                # set up interpolators and projectors
                self.x_adv_interpolator = Interpolator(self.x_in, x_adv)  # interpolate before recovery
                self.x_rec_projector = Recoverer(x_adv, x_rec)  # recovered function
                # when the "average" method comes into firedrake master, this will be
                # self.x_rec_projector = Projector(self.x_in, equation.Vrec, method="average")
                self.x_brok_projector = Projector(x_rec, x_brok)  # function projected back
                self.xdg_interpolator = Interpolator(self.x_in + x_rec - x_brok, self.xdg_in)
                if self.limiter is not None:
                    self.x_brok_interpolator = Interpolator(self.xdg_out, x_brok)
                    self.x_out_projector = Recoverer(x_brok, self.x_projected)
                    # when the "average" method comes into firedrake master, this will be
                    # self.x_out_projector = Projector(x_brok, self.x_projected, method="average")
        else:
            self.embedded_dg = False
            fs = field.function_space()

        # setup required functions
        self.fs = fs
        self.dq = Function(fs)
        self.q1 = Function(fs)
示例#10
0
    def __init__(self,
                 v_CG1,
                 v_DG1,
                 method=Boundary_Method.physics,
                 coords_to_adjust=None):

        self.v_DG1 = v_DG1
        self.v_CG1 = v_CG1
        self.v_DG1_old = Function(v_DG1.function_space())
        self.coords_to_adjust = coords_to_adjust

        self.method = method
        mesh = v_CG1.function_space().mesh()
        VDG0 = FunctionSpace(mesh, "DG", 0)
        VCG1 = FunctionSpace(mesh, "CG", 1)
        VDG1 = FunctionSpace(mesh, "DG", 1)

        self.num_ext = Function(VDG0)

        # check function spaces of functions
        if self.method == Boundary_Method.dynamics:
            if v_CG1.function_space() != VCG1:
                raise NotImplementedError(
                    "This boundary recovery method requires v1 to be in CG1.")
            if v_DG1.function_space() != VDG1:
                raise NotImplementedError(
                    "This boundary recovery method requires v_out to be in DG1."
                )
            # check whether mesh is valid
            if mesh.topological_dimension() == 2:
                # if mesh is extruded then we're fine, but if not needs to be quads
                if not VDG0.extruded and mesh.ufl_cell().cellname(
                ) != 'quadrilateral':
                    raise NotImplementedError(
                        'For 2D meshes this recovery method requires that elements are quadrilaterals'
                    )
            elif mesh.topological_dimension() == 3:
                # assume that 3D mesh is extruded
                if mesh._base_mesh.ufl_cell().cellname() != 'quadrilateral':
                    raise NotImplementedError(
                        'For 3D extruded meshes this recovery method requires a base mesh with quadrilateral elements'
                    )
            elif mesh.topological_dimension() != 1:
                raise NotImplementedError(
                    'This boundary recovery is implemented only on certain classes of mesh.'
                )
            if coords_to_adjust is None:
                raise ValueError(
                    'Need coords_to_adjust field for dynamics boundary methods'
                )

        elif self.method == Boundary_Method.physics:
            # check that mesh is valid -- must be an extruded mesh
            if not VDG0.extruded:
                raise NotImplementedError(
                    'The physics boundary method only works on extruded meshes'
                )
            # base spaces
            cell = mesh._base_mesh.ufl_cell().cellname()
            w_hori = FiniteElement("DG", cell, 0)
            w_vert = FiniteElement("CG", interval, 1)
            # build element
            theta_element = TensorProductElement(w_hori, w_vert)
            # spaces
            Vtheta = FunctionSpace(mesh, theta_element)
            Vtheta_broken = FunctionSpace(mesh, BrokenElement(theta_element))
            if v_CG1.function_space() != Vtheta:
                raise ValueError(
                    "This boundary recovery method requires v_CG1 to be in DG0xCG1 TensorProductSpace."
                )
            if v_DG1.function_space() != Vtheta_broken:
                raise ValueError(
                    "This boundary recovery method requires v_DG1 to be in the broken DG0xCG1 TensorProductSpace."
                )
        else:
            raise ValueError(
                "Boundary method should be a Boundary Method Enum object.")

        VuDG1 = VectorFunctionSpace(VDG0.mesh(), "DG", 1)
        x = SpatialCoordinate(VDG0.mesh())
        self.interpolator = Interpolator(self.v_CG1, self.v_DG1)

        if self.method == Boundary_Method.dynamics:

            # STRATEGY
            # obtain a coordinate field for all the nodes
            VuDG1 = VectorFunctionSpace(mesh, "DG", 1)
            self.act_coords = Function(VuDG1).project(x)  # actual coordinates
            self.eff_coords = Function(VuDG1).project(
                x)  # effective coordinates

            shapes = {
                "nDOFs":
                self.v_DG1.function_space().finat_element.space_dimension(),
                "dim":
                np.prod(VuDG1.shape, dtype=int)
            }

            num_ext_domain = ("{{[i]: 0 <= i < {nDOFs}}}").format(**shapes)
            num_ext_instructions = ("""
            <float64> SUM_EXT = 0
            for i
                SUM_EXT = SUM_EXT + EXT_V1[i]
            end

            NUM_EXT[0] = SUM_EXT
            """)

            coords_domain = ("{{[i, j, k, ii, jj, kk, ll, mm, iii, kkk]: "
                             "0 <= i < {nDOFs} and "
                             "0 <= j < {nDOFs} and 0 <= k < {dim} and "
                             "0 <= ii < {nDOFs} and 0 <= jj < {nDOFs} and "
                             "0 <= kk < {dim} and 0 <= ll < {dim} and "
                             "0 <= mm < {dim} and 0 <= iii < {nDOFs} and "
                             "0 <= kkk < {dim}}}").format(**shapes)
            coords_insts = (
                """
                            <float64> sum_V1_ext = 0
                            <int> index = 100
                            <float64> dist = 0.0
                            <float64> max_dist = 0.0
                            <float64> min_dist = 0.0
                            """

                # only do adjustment in cells with at least one DOF to adjust
                """
                            if NUM_EXT[0] > 0
                            """

                # find the maximum distance between DOFs in this cell, to serve as starting point for finding min distances
                """
                                for i
                                    for j
                                        dist = 0.0
                                        for k
                                            dist = dist + pow(ACT_COORDS[i,k] - ACT_COORDS[j,k], 2.0)
                                        end
                                        dist = pow(dist, 0.5) {{id=sqrt_max_dist, dep=*}}
                                        max_dist = fmax(dist, max_dist) {{id=max_dist, dep=sqrt_max_dist}}
                                    end
                                end
                            """

                # loop through cells and find which ones to adjust
                """
                                for ii
                                    if EXT_V1[ii] > 0.5
                            """

                # find closest interior node
                """
                                        min_dist = max_dist
                                        index = 100
                                        for jj
                                            if EXT_V1[jj] < 0.5
                                                dist = 0.0
                                                for kk
                                                    dist = dist + pow(ACT_COORDS[ii,kk] - ACT_COORDS[jj,kk], 2)
                                                end
                                                dist = pow(dist, 0.5)
                                                if dist <= min_dist
                                                    index = jj
                                                end
                                                min_dist = fmin(min_dist, dist)
                                                for ll
                                                    EFF_COORDS[ii,ll] = 0.5 * (ACT_COORDS[ii,ll] + ACT_COORDS[index,ll])
                                                end
                                            end
                                        end
                                    else
                            """

                # for DOFs that aren't exterior, use the original coordinates
                """
                                        for mm
                                            EFF_COORDS[ii, mm] = ACT_COORDS[ii, mm]
                                        end
                                    end
                                end
                            else
                            """

                # for interior elements, just use the original coordinates
                """
                                for iii
                                    for kkk
                                        EFF_COORDS[iii, kkk] = ACT_COORDS[iii, kkk]
                                    end
                                end
                            end
                            """).format(**shapes)

            elimin_domain = (
                "{{[i, ii_loop, jj_loop, kk, ll_loop, mm, iii_loop, kkk_loop, iiii, iiiii]: "
                "0 <= i < {nDOFs} and 0 <= ii_loop < {nDOFs} and "
                "ii_loop + 1 <= jj_loop < {nDOFs} and ii_loop <= kk < {nDOFs} and "
                "ii_loop + 1 <= ll_loop < {nDOFs} and ii_loop <= mm < {nDOFs} + 1 and "
                "0 <= iii_loop < {nDOFs} and {nDOFs} - iii_loop <= kkk_loop < {nDOFs} + 1 and "
                "0 <= iiii < {nDOFs} and 0 <= iiiii < {nDOFs}}}").format(
                    **shapes)
            elimin_insts = (
                """
                            <int> ii = 0
                            <int> jj = 0
                            <int> ll = 0
                            <int> iii = 0
                            <int> jjj = 0
                            <int> i_max = 0
                            <float64> A_max = 0.0
                            <float64> temp_f = 0.0
                            <float64> temp_A = 0.0
                            <float64> c = 0.0
                            <float64> f[{nDOFs}] = 0.0
                            <float64> a[{nDOFs}] = 0.0
                            <float64> A[{nDOFs},{nDOFs}] = 0.0
                            """

                # We are aiming to find the vector a that solves A*a = f, for matrix A and vector f.
                # This is done by performing row operations (swapping and scaling) to obtain A in upper diagonal form.
                # N.B. several for loops must be executed in numerical order (loopy does not necessarily do this).
                # For these loops we must manually iterate the index.
                """
                            if NUM_EXT[0] > 0.0
                            """

                # only do Gaussian elimination for elements with effective coordinates
                """
                                for i
                            """

                # fill f with the original field values and A with the effective coordinate values
                """
                                    f[i] = DG1_OLD[i]
                                    A[i,0] = 1.0
                                    A[i,1] = EFF_COORDS[i,0]
                                    if {nDOFs} > 3
                                        A[i,2] = EFF_COORDS[i,1]
                                        A[i,3] = EFF_COORDS[i,0]*EFF_COORDS[i,1]
                                        if {nDOFs} > 7
                                            A[i,4] = EFF_COORDS[i,{dim}-1]
                                            A[i,5] = EFF_COORDS[i,0]*EFF_COORDS[i,{dim}-1]
                                            A[i,6] = EFF_COORDS[i,1]*EFF_COORDS[i,{dim}-1]
                                            A[i,7] = EFF_COORDS[i,0]*EFF_COORDS[i,1]*EFF_COORDS[i,{dim}-1]
                                        end
                                    end
                                end
                            """

                # now loop through rows/columns of A
                """
                                for ii_loop
                                    A_max = fabs(A[ii,ii])
                                    i_max = ii
                            """

                # loop to find the largest value in the ith column
                # set i_max as the index of the row with this largest value.
                """
                                    jj = ii + 1
                                    for jj_loop
                                        if fabs(A[jj,ii]) > A_max
                                            i_max = jj
                                        end
                                        A_max = fmax(A_max, fabs(A[jj,ii]))
                                        jj = jj + 1
                                    end
                            """

                # if the max value in the ith column isn't in the ith row, we must swap the rows
                """
                                    if i_max != ii
                            """

                # swap the elements of f
                """
                                        temp_f = f[ii]  {{id=set_temp_f, dep=*}}
                                        f[ii] = f[i_max]  {{id=set_f_imax, dep=set_temp_f}}
                                        f[i_max] = temp_f  {{id=set_f_ii, dep=set_f_imax}}
                            """

                # swap the elements of A
                # N.B. kk runs from ii to (nDOFs-1) as elements below diagonal should be 0
                """
                                        for kk
                                            temp_A = A[ii,kk]  {{id=set_temp_A, dep=*}}
                                            A[ii, kk] = A[i_max, kk]  {{id=set_A_ii, dep=set_temp_A}}
                                            A[i_max, kk] = temp_A  {{id=set_A_imax, dep=set_A_ii}}
                                        end
                                    end
                            """

                # scale the rows below the ith row
                """
                                    ll = ii + 1
                                    for ll_loop
                                        if ll > ii
                            """

                # find scaling factor
                """
                                            c = - A[ll,ii] / A[ii,ii]
                            """

                # N.B. mm runs from ii to (nDOFs-1) as elements below diagonal should be 0
                """
                                            for mm
                                                A[ll, mm] = A[ll, mm] + c * A[ii,mm]
                                            end
                                            f[ll] = f[ll] + c * f[ii]
                                        end
                                        ll = ll + 1
                                    end
                                    ii = ii + 1
                                end
                            """

                # do back substitution of upper diagonal A to obtain a
                """
                                iii = 0
                                for iii_loop
                            """

                # jjj starts at the bottom row and works upwards
                """
                                    jjj = {nDOFs} - iii - 1  {{id=assign_jjj, dep=*}}
                                    a[jjj] = f[jjj]   {{id=set_a, dep=assign_jjj}}
                                    for kkk_loop
                                        a[jjj] = a[jjj] - A[jjj,kkk_loop] * a[kkk_loop]
                                    end
                                    a[jjj] = a[jjj] / A[jjj,jjj]
                                    iii = iii + 1
                                end
                            """

                # Having found a, this gives us the coefficients for the Taylor expansion with the actual coordinates.
                """
                                for iiii
                                    if {nDOFs} == 2
                                        DG1[iiii] = a[0] + a[1]*ACT_COORDS[iiii,0]
                                    elif {nDOFs} == 4
                                        DG1[iiii] = a[0] + a[1]*ACT_COORDS[iiii,0] + a[2]*ACT_COORDS[iiii,1] + a[3]*ACT_COORDS[iiii,0]*ACT_COORDS[iiii,1]
                                    elif {nDOFs} == 8
                                        DG1[iiii] = a[0] + a[1]*ACT_COORDS[iiii,0] + a[2]*ACT_COORDS[iiii,1] + a[3]*ACT_COORDS[iiii,0]*ACT_COORDS[iiii,1] + a[4]*ACT_COORDS[iiii,{dim}-1] + a[5]*ACT_COORDS[iiii,0]*ACT_COORDS[iiii,{dim}-1] + a[6]*ACT_COORDS[iiii,1]*ACT_COORDS[iiii,{dim}-1] + a[7]*ACT_COORDS[iiii,0]*ACT_COORDS[iiii,1]*ACT_COORDS[iiii,{dim}-1]
                                    end
                                end
                            """

                # if element is not external, just use old field values.
                """
                            else
                                for iiiii
                                    DG1[iiiii] = DG1_OLD[iiiii]
                                end
                            end
                            """).format(**shapes)

            _num_ext_kernel = (num_ext_domain, num_ext_instructions)
            _eff_coords_kernel = (coords_domain, coords_insts)
            self._gaussian_elimination_kernel = (elimin_domain, elimin_insts)

            # find number of external DOFs per cell
            par_loop(_num_ext_kernel,
                     dx, {
                         "NUM_EXT": (self.num_ext, WRITE),
                         "EXT_V1": (self.coords_to_adjust, READ)
                     },
                     is_loopy_kernel=True)

            # find effective coordinates
            logger.warning(
                'Finding effective coordinates for boundary recovery. This could give unexpected results for deformed meshes over very steep topography.'
            )
            par_loop(_eff_coords_kernel,
                     dx, {
                         "EFF_COORDS": (self.eff_coords, WRITE),
                         "ACT_COORDS": (self.act_coords, READ),
                         "NUM_EXT": (self.num_ext, READ),
                         "EXT_V1": (self.coords_to_adjust, READ)
                     },
                     is_loopy_kernel=True)

        elif self.method == Boundary_Method.physics:
            top_bottom_domain = ("{[i]: 0 <= i < 1}")
            bottom_instructions = ("""
                                   DG1[0] = 2 * CG1[0] - CG1[1]
                                   DG1[1] = CG1[1]
                                   """)
            top_instructions = ("""
                                DG1[0] = CG1[0]
                                DG1[1] = -CG1[0] + 2 * CG1[1]
                                """)

            self._bottom_kernel = (top_bottom_domain, bottom_instructions)
            self._top_kernel = (top_bottom_domain, top_instructions)
示例#11
0
class Advection(object, metaclass=ABCMeta):
    """
    Base class for advection schemes.

    :arg state: :class:`.State` object.
    :arg field: field to be advected
    :arg equation: :class:`.Equation` object, specifying the equation
    that field satisfies
    :arg solver_parameters: solver_parameters
    :arg limiter: :class:`.Limiter` object.
    :arg options: :class:`.AdvectionOptions` object
    """

    def __init__(self, state, field, equation=None, *, solver_parameters=None,
                 limiter=None):

        if equation is not None:

            self.state = state
            self.field = field
            self.equation = equation
            # get ubar from the equation class
            self.ubar = self.equation.ubar
            self.dt = self.state.timestepping.dt

            # get default solver options if none passed in
            if solver_parameters is None:
                self.solver_parameters = equation.solver_parameters
            else:
                self.solver_parameters = solver_parameters
                if logger.isEnabledFor(DEBUG):
                    self.solver_parameters["ksp_monitor_true_residual"] = True

            self.limiter = limiter

            if hasattr(equation, "options"):
                self.discretisation_option = equation.options.name
                self._setup(state, field, equation.options)
            else:
                self.discretisation_option = None
                self.fs = field.function_space()

            # setup required functions
            self.dq = Function(self.fs)
            self.q1 = Function(self.fs)

    def _setup(self, state, field, options):

        if options.name in ["embedded_dg", "recovered"]:
            self.fs = options.embedding_space
            self.xdg_in = Function(self.fs)
            self.xdg_out = Function(self.fs)
            self.x_projected = Function(field.function_space())
            parameters = {'ksp_type': 'cg',
                          'pc_type': 'bjacobi',
                          'sub_pc_type': 'ilu'}
            self.Projector = Projector(self.xdg_out, self.x_projected,
                                       solver_parameters=parameters)

        if options.name == "recovered":
            # set up the necessary functions
            self.x_in = Function(field.function_space())
            x_rec = Function(options.recovered_space)
            x_brok = Function(options.broken_space)

            # set up interpolators and projectors
            self.x_rec_projector = Recoverer(self.x_in, x_rec, VDG=self.fs, boundary_method=options.boundary_method)  # recovered function
            self.x_brok_projector = Projector(x_rec, x_brok)  # function projected back
            self.xdg_interpolator = Interpolator(self.x_in + x_rec - x_brok, self.xdg_in)
            if self.limiter is not None:
                self.x_brok_interpolator = Interpolator(self.xdg_out, x_brok)
                self.x_out_projector = Recoverer(x_brok, self.x_projected)

    def pre_apply(self, x_in, discretisation_option):
        """
        Extra steps to advection if using an embedded method,
        which might be either the plain embedded method or the
        recovered space advection scheme.

        :arg x_in: the input set of prognostic fields.
        :arg discretisation option: string specifying which scheme to use.
        """
        if discretisation_option == "embedded_dg":
            try:
                self.xdg_in.interpolate(x_in)
            except NotImplementedError:
                self.xdg_in.project(x_in)

        elif discretisation_option == "recovered":
            self.x_in.assign(x_in)
            self.x_rec_projector.project()
            self.x_brok_projector.project()
            self.xdg_interpolator.interpolate()

    def post_apply(self, x_out, discretisation_option):
        """
        The projection steps, returning a field to its original space
        for an embedded DG advection scheme. For the case of the
        recovered scheme, there are two options dependent on whether
        the scheme is limited or not.

        :arg x_out: the outgoing field.
        :arg discretisation_option: string specifying which option to use.
        """
        if discretisation_option == "embedded_dg":
            self.Projector.project()

        elif discretisation_option == "recovered":
            if self.limiter is not None:
                self.x_brok_interpolator.interpolate()
                self.x_out_projector.project()
            else:
                self.Projector.project()
        x_out.assign(self.x_projected)

    @abstractproperty
    def lhs(self):
        return self.equation.mass_term(self.equation.trial)

    @abstractproperty
    def rhs(self):
        return self.equation.mass_term(self.q1) - self.dt*self.equation.advection_term(self.q1)

    def update_ubar(self, xn, xnp1, alpha):
        un = xn.split()[0]
        unp1 = xnp1.split()[0]
        self.ubar.assign(un + alpha*(unp1-un))

    @cached_property
    def solver(self):
        # setup solver using lhs and rhs defined in derived class
        problem = LinearVariationalProblem(self.lhs, self.rhs, self.dq)
        solver_name = self.field.name()+self.equation.__class__.__name__+self.__class__.__name__
        return LinearVariationalSolver(problem, solver_parameters=self.solver_parameters, options_prefix=solver_name)

    @abstractmethod
    def apply(self, x_in, x_out):
        """
        Function takes x as input, computes L(x) as defined by the equation,
        and returns x_out as output.

        :arg x: :class:`.Function` object, the input Function.
        :arg x_out: :class:`.Function` object, the output Function.
        """
        pass
示例#12
0
    def __init__(self,
                 v_CG1,
                 v_DG1,
                 method=Boundary_Method.physics,
                 eff_coords=None):

        self.v_DG1 = v_DG1
        self.v_CG1 = v_CG1
        self.v_DG1_old = Function(v_DG1.function_space())
        self.eff_coords = eff_coords

        self.method = method
        mesh = v_CG1.function_space().mesh()
        DG0 = FunctionSpace(mesh, "DG", 0)
        CG1 = FunctionSpace(mesh, "CG", 1)

        if DG0.extruded:
            cell = mesh._base_mesh.ufl_cell().cellname()
            DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
            DG1_vert_elt = FiniteElement("DG",
                                         interval,
                                         1,
                                         variant="equispaced")
            DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
        else:
            cell = mesh.ufl_cell().cellname()
            DG1_element = FiniteElement("DG", cell, 1, variant="equispaced")
        DG1 = FunctionSpace(mesh, DG1_element)

        self.num_ext = find_domain_boundaries(mesh)

        # check function spaces of functions
        if self.method == Boundary_Method.dynamics:
            if v_CG1.function_space() != CG1:
                raise NotImplementedError(
                    "This boundary recovery method requires v1 to be in CG1.")
            if v_DG1.function_space() != DG1:
                raise NotImplementedError(
                    "This boundary recovery method requires v_out to be in DG1."
                )
            if eff_coords is None:
                raise ValueError(
                    'Need eff_coords field for dynamics boundary methods')

        elif self.method == Boundary_Method.physics:
            # check that mesh is valid -- must be an extruded mesh
            if not DG0.extruded:
                raise NotImplementedError(
                    'The physics boundary method only works on extruded meshes'
                )
            # base spaces
            cell = mesh._base_mesh.ufl_cell().cellname()
            w_hori = FiniteElement("DG", cell, 0, variant="equispaced")
            w_vert = FiniteElement("CG", interval, 1, variant="equispaced")
            # build element
            theta_element = TensorProductElement(w_hori, w_vert)
            # spaces
            Vtheta = FunctionSpace(mesh, theta_element)
            Vtheta_broken = FunctionSpace(mesh, BrokenElement(theta_element))
            if v_CG1.function_space() != Vtheta:
                raise ValueError(
                    "This boundary recovery method requires v_CG1 to be in DG0xCG1 TensorProductSpace."
                )
            if v_DG1.function_space() != Vtheta_broken:
                raise ValueError(
                    "This boundary recovery method requires v_DG1 to be in the broken DG0xCG1 TensorProductSpace."
                )
        else:
            raise ValueError(
                "Boundary method should be a Boundary Method Enum object.")

        vec_DG1 = VectorFunctionSpace(DG0.mesh(), DG1_element)
        x = SpatialCoordinate(DG0.mesh())
        self.interpolator = Interpolator(self.v_CG1, self.v_DG1)

        if self.method == Boundary_Method.dynamics:

            # STRATEGY
            # obtain a coordinate field for all the nodes
            self.act_coords = Function(vec_DG1).project(
                x)  # actual coordinates
            self.eff_coords = eff_coords  # effective coordinates
            self.output = Function(DG1)
            self.on_exterior = find_domain_boundaries(mesh)

            self.gaussian_elimination_kernel = kernels.GaussianElimination(DG1)

        elif self.method == Boundary_Method.physics:

            self.bottom_kernel = kernels.PhysicsRecoveryBottom()
            self.top_kernel = kernels.PhysicsRecoveryTop()
示例#13
0
class Boundary_Recoverer(object):
    """
    An object that performs a `recovery` process at the domain
    boundaries that has second order accuracy. This is necessary
    because the :class:`Averager` object does not recover a field
    with sufficient accuracy at the boundaries.

    The strategy is to minimise the curvature of the function in
    the boundary cells, subject to the constraints of conserved
    mass and continuity on the interior facets. The quickest way
    to perform this is by using the analytic solution and a parloop.

    Currently this is only implemented for the (DG0, DG1, CG1)
    set of spaces, and only on a `PeriodicIntervalMesh` or
    'PeriodicUnitIntervalMesh` that has been extruded.

    :arg v_CG1: the continuous function after the first recovery
             is performed. Should be in CG1. This is correct
             on the interior of the domain.
    :arg v_DG1: the function to be output. Should be in DG1.
    :arg method: a Boundary_Method Enum object.
    :arg eff_coords: the effective coordinates of the iniital recovery.
                     This must be provided for the dynamics Boundary_Method.
    """
    def __init__(self,
                 v_CG1,
                 v_DG1,
                 method=Boundary_Method.physics,
                 eff_coords=None):

        self.v_DG1 = v_DG1
        self.v_CG1 = v_CG1
        self.v_DG1_old = Function(v_DG1.function_space())
        self.eff_coords = eff_coords

        self.method = method
        mesh = v_CG1.function_space().mesh()
        DG0 = FunctionSpace(mesh, "DG", 0)
        CG1 = FunctionSpace(mesh, "CG", 1)

        if DG0.extruded:
            cell = mesh._base_mesh.ufl_cell().cellname()
            DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
            DG1_vert_elt = FiniteElement("DG",
                                         interval,
                                         1,
                                         variant="equispaced")
            DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
        else:
            cell = mesh.ufl_cell().cellname()
            DG1_element = FiniteElement("DG", cell, 1, variant="equispaced")
        DG1 = FunctionSpace(mesh, DG1_element)

        self.num_ext = find_domain_boundaries(mesh)

        # check function spaces of functions
        if self.method == Boundary_Method.dynamics:
            if v_CG1.function_space() != CG1:
                raise NotImplementedError(
                    "This boundary recovery method requires v1 to be in CG1.")
            if v_DG1.function_space() != DG1:
                raise NotImplementedError(
                    "This boundary recovery method requires v_out to be in DG1."
                )
            if eff_coords is None:
                raise ValueError(
                    'Need eff_coords field for dynamics boundary methods')

        elif self.method == Boundary_Method.physics:
            # check that mesh is valid -- must be an extruded mesh
            if not DG0.extruded:
                raise NotImplementedError(
                    'The physics boundary method only works on extruded meshes'
                )
            # base spaces
            cell = mesh._base_mesh.ufl_cell().cellname()
            w_hori = FiniteElement("DG", cell, 0, variant="equispaced")
            w_vert = FiniteElement("CG", interval, 1, variant="equispaced")
            # build element
            theta_element = TensorProductElement(w_hori, w_vert)
            # spaces
            Vtheta = FunctionSpace(mesh, theta_element)
            Vtheta_broken = FunctionSpace(mesh, BrokenElement(theta_element))
            if v_CG1.function_space() != Vtheta:
                raise ValueError(
                    "This boundary recovery method requires v_CG1 to be in DG0xCG1 TensorProductSpace."
                )
            if v_DG1.function_space() != Vtheta_broken:
                raise ValueError(
                    "This boundary recovery method requires v_DG1 to be in the broken DG0xCG1 TensorProductSpace."
                )
        else:
            raise ValueError(
                "Boundary method should be a Boundary Method Enum object.")

        vec_DG1 = VectorFunctionSpace(DG0.mesh(), DG1_element)
        x = SpatialCoordinate(DG0.mesh())
        self.interpolator = Interpolator(self.v_CG1, self.v_DG1)

        if self.method == Boundary_Method.dynamics:

            # STRATEGY
            # obtain a coordinate field for all the nodes
            self.act_coords = Function(vec_DG1).project(
                x)  # actual coordinates
            self.eff_coords = eff_coords  # effective coordinates
            self.output = Function(DG1)
            self.on_exterior = find_domain_boundaries(mesh)

            self.gaussian_elimination_kernel = kernels.GaussianElimination(DG1)

        elif self.method == Boundary_Method.physics:

            self.bottom_kernel = kernels.PhysicsRecoveryBottom()
            self.top_kernel = kernels.PhysicsRecoveryTop()

    def apply(self):
        self.interpolator.interpolate()
        if self.method == Boundary_Method.physics:
            self.bottom_kernel.apply(self.v_DG1, self.v_CG1)
            self.top_kernel.apply(self.v_DG1, self.v_CG1)

        else:
            self.v_DG1_old.assign(self.v_DG1)
            self.gaussian_elimination_kernel.apply(self.v_DG1_old, self.v_DG1,
                                                   self.act_coords,
                                                   self.eff_coords,
                                                   self.num_ext)
示例#14
0
    def __init__(self, state):
        super(Condensation, self).__init__(state)

        # obtain our fields
        self.theta = state.fields('theta')
        self.water_v = state.fields('water_v')
        self.water_c = state.fields('water_c')
        rho = state.fields('rho')

        # declare function space
        Vt = self.theta.function_space()

        param = self.state.parameters

        # define some parameters as attributes
        dt = self.state.timestepping.dt
        R_d = param.R_d
        p_0 = param.p_0
        kappa = param.kappa
        cp = param.cp
        cv = param.cv
        c_pv = param.c_pv
        c_pl = param.c_pl
        c_vv = param.c_vv
        R_v = param.R_v
        L_v0 = param.L_v0
        T_0 = param.T_0
        w_sat1 = param.w_sat1
        w_sat2 = param.w_sat2
        w_sat3 = param.w_sat3
        w_sat4 = param.w_sat4

        # make useful fields
        Pi = ((R_d * rho * self.theta / p_0)**(kappa / (1.0 - kappa)))
        T = Pi * self.theta * R_d / (R_d + self.water_v * R_v)
        p = p_0 * Pi**(1.0 / kappa)
        L_v = L_v0 - (c_pl - c_pv) * (T - T_0)
        R_m = R_d + R_v * self.water_v
        c_pml = cp + c_pv * self.water_v + c_pl * self.water_c
        c_vml = cv + c_vv * self.water_v + c_pl * self.water_c

        # use Teten's formula to calculate w_sat
        w_sat = (w_sat1 / (p * exp(w_sat2 * (T - T_0) /
                                   (T - w_sat3)) - w_sat4))

        # make appropriate condensation rate
        dot_r_cond = ((self.water_v - w_sat) / (dt * (1.0 +
                                                      ((L_v**2.0 * w_sat) /
                                                       (cp * R_v * T**2.0)))))

        # make cond_rate function, that needs to be the same for all updates in one time step
        self.cond_rate = Function(Vt)

        # adjust cond rate so negative concentrations don't occur
        self.lim_cond_rate = Interpolator(
            conditional(dot_r_cond < 0,
                        max_value(dot_r_cond, -self.water_c / dt),
                        min_value(dot_r_cond, self.water_v / dt)),
            self.cond_rate)

        # tell the prognostic fields what to update to
        self.water_v_new = Interpolator(self.water_v - dt * self.cond_rate, Vt)
        self.water_c_new = Interpolator(self.water_c + dt * self.cond_rate, Vt)
        self.theta_new = Interpolator(
            self.theta * (1.0 + dt * self.cond_rate *
                          (cv * L_v / (c_vml * cp * T) - R_v * cv * c_pml /
                           (R_m * cp * c_vml))), Vt)
示例#15
0
class Coalescence(Physics):
    """
    The process of the coalescence of cloud
    droplets to form rain droplets. These
    parametrizations come from Klemp and
    Wilhelmson (1978).

    :arg state: :class:`.State.` object.
    :arg accretion: Boolean which determines
                    whether the accretion
                    process is used.
    :arg accumulation: Boolean which determines
                    whether the accumulation
                    process is used.
    """
    def __init__(self, state, accretion=True, accumulation=True):
        super().__init__(state)

        # obtain our fields
        self.water_c = state.fields('water_c')
        self.rain = state.fields('rain')

        # declare function space
        Vt = self.water_c.function_space()

        # define some parameters as attributes
        dt = state.timestepping.dt
        k_1 = Constant(0.001)  # accretion rate in 1/s
        k_2 = Constant(2.2)  # accumulation rate in 1/s
        a = Constant(0.001)  # min cloud conc in kg/kg
        b = Constant(0.875)  # power for rain in accumulation

        # make default rates to be zero
        accr_rate = Constant(0.0)
        accu_rate = Constant(0.0)

        if accretion:
            accr_rate = k_1 * (self.water_c - a)
        if accumulation:
            accu_rate = k_2 * self.water_c * self.rain**b

        # make coalescence rate function, that needs to be the same for all updates in one time step
        coalesce_rate = Function(Vt)

        # adjust coalesce rate using min_value so negative cloud concentration doesn't occur
        self.lim_coalesce_rate = Interpolator(
            conditional(
                self.rain < 0.0,  # if rain is negative do only accretion
                conditional(accr_rate < 0.0, 0.0,
                            min_value(accr_rate, self.water_c / dt)),
                # don't turn rain back into cloud
                conditional(
                    accr_rate + accu_rate < 0.0,
                    0.0,
                    # if accretion rate is negative do only accumulation
                    conditional(
                        accr_rate < 0.0, min_value(accu_rate,
                                                   self.water_c / dt),
                        min_value(accr_rate + accu_rate, self.water_c / dt)))),
            coalesce_rate)

        # tell the prognostic fields what to update to
        self.water_c_new = Interpolator(self.water_c - dt * coalesce_rate, Vt)
        self.rain_new = Interpolator(self.rain + dt * coalesce_rate, Vt)

    def apply(self):
        self.lim_coalesce_rate.interpolate()
        self.rain.assign(self.rain_new.interpolate())
        self.water_c.assign(self.water_c_new.interpolate())
示例#16
0
    def setup(self, equation, uadv=None, apply_bcs=True, *active_labels):

        self.residual = equation.residual

        if self.field_name is not None:
            self.idx = equation.field_names.index(self.field_name)
            self.fs = self.state.fields(self.field_name).function_space()
            self.residual = self.residual.label_map(
                lambda t: t.get(prognostic) == self.field_name,
                lambda t: Term(
                    split_form(t.form)[self.idx].form,
                    t.labels),
                drop)
            bcs = equation.bcs[self.field_name]

        else:
            self.field_name = equation.field_name
            self.fs = equation.function_space
            self.idx = None
            if type(self.fs.ufl_element()) is MixedElement:
                bcs = [bc for _, bcs in equation.bcs.items() for bc in bcs]
            else:
                bcs = equation.bcs[self.field_name]

        if len(active_labels) > 0:
            self.residual = self.residual.label_map(
                lambda t: any(t.has_label(time_derivative, *active_labels)),
                map_if_false=drop)

        options = self.options

        # -------------------------------------------------------------------- #
        # Routines relating to transport
        # -------------------------------------------------------------------- #

        if hasattr(self.options, 'ibp'):
            self.replace_transport_term()
        self.replace_transporting_velocity(uadv)

        # -------------------------------------------------------------------- #
        # Wrappers for embedded / recovery methods
        # -------------------------------------------------------------------- #

        if self.discretisation_option in ["embedded_dg", "recovered"]:
            # construct the embedding space if not specified
            if options.embedding_space is None:
                V_elt = BrokenElement(self.fs.ufl_element())
                self.fs = FunctionSpace(self.state.mesh, V_elt)
            else:
                self.fs = options.embedding_space
            self.xdg_in = Function(self.fs)
            self.xdg_out = Function(self.fs)
            if self.idx is None:
                self.x_projected = Function(equation.function_space)
            else:
                self.x_projected = Function(self.state.fields(self.field_name).function_space())
            new_test = TestFunction(self.fs)
            parameters = {'ksp_type': 'cg',
                          'pc_type': 'bjacobi',
                          'sub_pc_type': 'ilu'}

        # -------------------------------------------------------------------- #
        # Make boundary conditions
        # -------------------------------------------------------------------- #

        if not apply_bcs:
            self.bcs = None
        elif self.discretisation_option in ["embedded_dg", "recovered"]:
            # Transfer boundary conditions onto test function space
            self.bcs = [DirichletBC(self.fs, bc.function_arg, bc.sub_domain) for bc in bcs]
        else:
            self.bcs = bcs

        # -------------------------------------------------------------------- #
        # Modify test function for SUPG methods
        # -------------------------------------------------------------------- #

        if self.discretisation_option == "supg":
            # construct tau, if it is not specified
            dim = self.state.mesh.topological_dimension()
            if options.tau is not None:
                # if tau is provided, check that is has the right size
                tau = options.tau
                assert as_ufl(tau).ufl_shape == (dim, dim), "Provided tau has incorrect shape!"
            else:
                # create tuple of default values of size dim
                default_vals = [options.default*self.dt]*dim
                # check for directions is which the space is discontinuous
                # so that we don't apply supg in that direction
                if is_cg(self.fs):
                    vals = default_vals
                else:
                    space = self.fs.ufl_element().sobolev_space()
                    if space.name in ["HDiv", "DirectionalH"]:
                        vals = [default_vals[i] if space[i].name == "H1"
                                else 0. for i in range(dim)]
                    else:
                        raise ValueError("I don't know what to do with space %s" % space)
                tau = Constant(tuple([
                    tuple(
                        [vals[j] if i == j else 0. for i, v in enumerate(vals)]
                    ) for j in range(dim)])
                )
                self.solver_parameters = {'ksp_type': 'gmres',
                                          'pc_type': 'bjacobi',
                                          'sub_pc_type': 'ilu'}

            test = TestFunction(self.fs)
            new_test = test + dot(dot(uadv, tau), grad(test))

        if self.discretisation_option is not None:
            # replace the original test function with one defined on
            # the embedding space, as this is the space where the
            # the problem will be solved
            self.residual = self.residual.label_map(
                all_terms,
                map_if_true=replace_test_function(new_test))

        if self.discretisation_option == "embedded_dg":
            if self.limiter is None:
                self.x_out_projector = Projector(self.xdg_out, self.x_projected,
                                                 solver_parameters=parameters)
            else:
                self.x_out_projector = Recoverer(self.xdg_out, self.x_projected)

        if self.discretisation_option == "recovered":
            # set up the necessary functions
            self.x_in = Function(self.state.fields(self.field_name).function_space())
            x_rec = Function(options.recovered_space)
            x_brok = Function(options.broken_space)

            # set up interpolators and projectors
            self.x_rec_projector = Recoverer(self.x_in, x_rec, VDG=self.fs, boundary_method=options.boundary_method)  # recovered function
            self.x_brok_projector = Projector(x_rec, x_brok)  # function projected back
            self.xdg_interpolator = Interpolator(self.x_in + x_rec - x_brok, self.xdg_in)
            if self.limiter is not None:
                self.x_brok_interpolator = Interpolator(self.xdg_out, x_brok)
                self.x_out_projector = Recoverer(x_brok, self.x_projected)
            else:
                self.x_out_projector = Projector(self.xdg_out, self.x_projected)

        # setup required functions
        self.dq = Function(self.fs)
        self.q1 = Function(self.fs)
示例#17
0
class TimeDiscretisation(object, metaclass=ABCMeta):
    """
    Base class for time discretisation schemes.

    :arg state: :class:`.State` object.
    :arg field: field to be evolved
    :arg equation: :class:`.Equation` object, specifying the equation
    that field satisfies
    :arg solver_parameters: solver_parameters
    :arg limiter: :class:`.Limiter` object.
    :arg options: :class:`.DiscretisationOptions` object
    """

    def __init__(self, state, field_name=None, solver_parameters=None,
                 limiter=None, options=None):

        self.state = state
        self.field_name = field_name

        self.dt = self.state.dt

        self.limiter = limiter

        self.options = options
        if options is not None:
            self.discretisation_option = options.name
        else:
            self.discretisation_option = None

        # get default solver options if none passed in
        if solver_parameters is None:
            self.solver_parameters = {'ksp_type': 'cg',
                                      'pc_type': 'bjacobi',
                                      'sub_pc_type': 'ilu'}
        else:
            self.solver_parameters = solver_parameters
            if logger.isEnabledFor(DEBUG):
                self.solver_parameters["ksp_monitor_true_residual"] = None

    def setup(self, equation, uadv=None, apply_bcs=True, *active_labels):

        self.residual = equation.residual

        if self.field_name is not None:
            self.idx = equation.field_names.index(self.field_name)
            self.fs = self.state.fields(self.field_name).function_space()
            self.residual = self.residual.label_map(
                lambda t: t.get(prognostic) == self.field_name,
                lambda t: Term(
                    split_form(t.form)[self.idx].form,
                    t.labels),
                drop)
            bcs = equation.bcs[self.field_name]

        else:
            self.field_name = equation.field_name
            self.fs = equation.function_space
            self.idx = None
            if type(self.fs.ufl_element()) is MixedElement:
                bcs = [bc for _, bcs in equation.bcs.items() for bc in bcs]
            else:
                bcs = equation.bcs[self.field_name]

        if len(active_labels) > 0:
            self.residual = self.residual.label_map(
                lambda t: any(t.has_label(time_derivative, *active_labels)),
                map_if_false=drop)

        options = self.options

        # -------------------------------------------------------------------- #
        # Routines relating to transport
        # -------------------------------------------------------------------- #

        if hasattr(self.options, 'ibp'):
            self.replace_transport_term()
        self.replace_transporting_velocity(uadv)

        # -------------------------------------------------------------------- #
        # Wrappers for embedded / recovery methods
        # -------------------------------------------------------------------- #

        if self.discretisation_option in ["embedded_dg", "recovered"]:
            # construct the embedding space if not specified
            if options.embedding_space is None:
                V_elt = BrokenElement(self.fs.ufl_element())
                self.fs = FunctionSpace(self.state.mesh, V_elt)
            else:
                self.fs = options.embedding_space
            self.xdg_in = Function(self.fs)
            self.xdg_out = Function(self.fs)
            if self.idx is None:
                self.x_projected = Function(equation.function_space)
            else:
                self.x_projected = Function(self.state.fields(self.field_name).function_space())
            new_test = TestFunction(self.fs)
            parameters = {'ksp_type': 'cg',
                          'pc_type': 'bjacobi',
                          'sub_pc_type': 'ilu'}

        # -------------------------------------------------------------------- #
        # Make boundary conditions
        # -------------------------------------------------------------------- #

        if not apply_bcs:
            self.bcs = None
        elif self.discretisation_option in ["embedded_dg", "recovered"]:
            # Transfer boundary conditions onto test function space
            self.bcs = [DirichletBC(self.fs, bc.function_arg, bc.sub_domain) for bc in bcs]
        else:
            self.bcs = bcs

        # -------------------------------------------------------------------- #
        # Modify test function for SUPG methods
        # -------------------------------------------------------------------- #

        if self.discretisation_option == "supg":
            # construct tau, if it is not specified
            dim = self.state.mesh.topological_dimension()
            if options.tau is not None:
                # if tau is provided, check that is has the right size
                tau = options.tau
                assert as_ufl(tau).ufl_shape == (dim, dim), "Provided tau has incorrect shape!"
            else:
                # create tuple of default values of size dim
                default_vals = [options.default*self.dt]*dim
                # check for directions is which the space is discontinuous
                # so that we don't apply supg in that direction
                if is_cg(self.fs):
                    vals = default_vals
                else:
                    space = self.fs.ufl_element().sobolev_space()
                    if space.name in ["HDiv", "DirectionalH"]:
                        vals = [default_vals[i] if space[i].name == "H1"
                                else 0. for i in range(dim)]
                    else:
                        raise ValueError("I don't know what to do with space %s" % space)
                tau = Constant(tuple([
                    tuple(
                        [vals[j] if i == j else 0. for i, v in enumerate(vals)]
                    ) for j in range(dim)])
                )
                self.solver_parameters = {'ksp_type': 'gmres',
                                          'pc_type': 'bjacobi',
                                          'sub_pc_type': 'ilu'}

            test = TestFunction(self.fs)
            new_test = test + dot(dot(uadv, tau), grad(test))

        if self.discretisation_option is not None:
            # replace the original test function with one defined on
            # the embedding space, as this is the space where the
            # the problem will be solved
            self.residual = self.residual.label_map(
                all_terms,
                map_if_true=replace_test_function(new_test))

        if self.discretisation_option == "embedded_dg":
            if self.limiter is None:
                self.x_out_projector = Projector(self.xdg_out, self.x_projected,
                                                 solver_parameters=parameters)
            else:
                self.x_out_projector = Recoverer(self.xdg_out, self.x_projected)

        if self.discretisation_option == "recovered":
            # set up the necessary functions
            self.x_in = Function(self.state.fields(self.field_name).function_space())
            x_rec = Function(options.recovered_space)
            x_brok = Function(options.broken_space)

            # set up interpolators and projectors
            self.x_rec_projector = Recoverer(self.x_in, x_rec, VDG=self.fs, boundary_method=options.boundary_method)  # recovered function
            self.x_brok_projector = Projector(x_rec, x_brok)  # function projected back
            self.xdg_interpolator = Interpolator(self.x_in + x_rec - x_brok, self.xdg_in)
            if self.limiter is not None:
                self.x_brok_interpolator = Interpolator(self.xdg_out, x_brok)
                self.x_out_projector = Recoverer(x_brok, self.x_projected)
            else:
                self.x_out_projector = Projector(self.xdg_out, self.x_projected)

        # setup required functions
        self.dq = Function(self.fs)
        self.q1 = Function(self.fs)

    def pre_apply(self, x_in, discretisation_option):
        """
        Extra steps to discretisation if using an embedded method,
        which might be either the plain embedded method or the
        recovered space scheme.

        :arg x_in: the input set of prognostic fields.
        :arg discretisation option: string specifying which scheme to use.
        """
        if discretisation_option == "embedded_dg":
            try:
                self.xdg_in.interpolate(x_in)
            except NotImplementedError:
                self.xdg_in.project(x_in)

        elif discretisation_option == "recovered":
            self.x_in.assign(x_in)
            self.x_rec_projector.project()
            self.x_brok_projector.project()
            self.xdg_interpolator.interpolate()

    def post_apply(self, x_out, discretisation_option):
        """
        The projection steps, returning a field to its original space
        for an embedded DG scheme. For the case of the
        recovered scheme, there are two options dependent on whether
        the scheme is limited or not.

        :arg x_out: the outgoing field.
        :arg discretisation_option: string specifying which option to use.
        """
        if discretisation_option == "recovered" and self.limiter is not None:
            self.x_brok_interpolator.interpolate()
        self.x_out_projector.project()
        x_out.assign(self.x_projected)

    @abstractproperty
    def lhs(self):
        l = self.residual.label_map(
            lambda t: t.has_label(time_derivative),
            map_if_true=replace_subject(self.dq, self.idx),
            map_if_false=drop)

        return l.form

    @abstractproperty
    def rhs(self):
        r = self.residual.label_map(
            all_terms,
            map_if_true=replace_subject(self.q1, self.idx))

        r = r.label_map(
            lambda t: t.has_label(time_derivative),
            map_if_false=lambda t: -self.dt*t)

        return r.form

    def replace_transport_term(self):
        """
        This routine allows the default transport term to be replaced with a
        different one, specified through the transport options.

        This is necessary because when the prognostic equations are declared,
        the whole transport
        """

        # Extract transport term of equation
        old_transport_term_list = self.residual.label_map(
            lambda t: t.has_label(transport), map_if_false=drop)

        # If there are more transport terms, extract only the one for this variable
        if len(old_transport_term_list.terms) > 1:
            raise NotImplementedError('Cannot replace transport terms when there are more than one')

        # Then we should only have one transport term
        old_transport_term = old_transport_term_list.terms[0]

        # If the transport term has an ibp label, then it could be replaced
        if old_transport_term.has_label(ibp_label) and hasattr(self.options, 'ibp'):
            # Do the options specify a different ibp to the old transport term?
            if old_transport_term.labels['ibp'] != self.options.ibp:
                # Set up a new transport term
                field = self.state.fields(self.field_name)
                test = TestFunction(self.fs)

                # Set up new transport term (depending on the type of transport equation)
                if old_transport_term.labels['transport'] == TransportEquationType.advective:
                    new_transport_term = advection_form(self.state, test, field, ibp=self.options.ibp)
                elif old_transport_term.labels['transport'] == TransportEquationType.conservative:
                    new_transport_term = continuity_form(self.state, test, field, ibp=self.options.ibp)
                else:
                    raise NotImplementedError(f'Replacement of transport term not implemented yet for {old_transport_term.labels["transport"]}')

                # Finally, drop the old transport term and add the new one
                self.residual = self.residual.label_map(
                    lambda t: t.has_label(transport), map_if_true=drop)
                self.residual += subject(new_transport_term, field)

    def replace_transporting_velocity(self, uadv):
        # replace the transporting velocity in any terms that contain it
        if any([t.has_label(transporting_velocity) for t in self.residual]):
            assert uadv is not None
            if uadv == "prognostic":
                self.residual = self.residual.label_map(
                    lambda t: t.has_label(transporting_velocity),
                    map_if_true=lambda t: Term(ufl.replace(
                        t.form, {t.get(transporting_velocity): split(t.get(subject))[0]}), t.labels)
                )
            else:
                self.residual = self.residual.label_map(
                    lambda t: t.has_label(transporting_velocity),
                    map_if_true=lambda t: Term(ufl.replace(
                        t.form, {t.get(transporting_velocity): uadv}), t.labels)
                )
            self.residual = transporting_velocity.update_value(self.residual, uadv)

    @cached_property
    def solver(self):
        # setup solver using lhs and rhs defined in derived class
        problem = NonlinearVariationalProblem(self.lhs-self.rhs, self.dq, bcs=self.bcs)
        solver_name = self.field_name+self.__class__.__name__
        return NonlinearVariationalSolver(problem, solver_parameters=self.solver_parameters, options_prefix=solver_name)

    @abstractmethod
    def apply(self, x_in, x_out):
        """
        Function takes x as input, computes L(x) as defined by the equation,
        and returns x_out as output.

        :arg x: :class:`.Function` object, the input Function.
        :arg x_out: :class:`.Function` object, the output Function.
        """
        pass
示例#18
0
class Condensation(Physics):
    """
    The process of condensation of water vapour
    into liquid water and evaporation of liquid
    water into water vapour, with the associated
    latent heat changes. The parametrization follows
    that used in Bryan and Fritsch (2002).

    :arg state: :class:`.State.` object.
    :arg iterations: number of iterations to do
         of condensation scheme per time step.
    """

    def __init__(self, state, iterations=1):
        super().__init__(state)

        self.iterations = iterations
        # obtain our fields
        self.theta = state.fields('theta')
        self.water_v = state.fields('vapour_mixing_ratio')
        self.water_c = state.fields('cloud_liquid_mixing_ratio')
        rho = state.fields('rho')
        try:
            # TODO: use the phase flag for the tracers here
            rain = state.fields('rain_mixing_ratio')
            water_l = self.water_c + rain
        except NotImplementedError:
            water_l = self.water_c

        # declare function space
        Vt = self.theta.function_space()

        # make rho variables
        # we recover rho into theta space
        h_deg = rho.function_space().ufl_element().degree()[0]
        v_deg = rho.function_space().ufl_element().degree()[1]
        if v_deg == 0 and h_deg == 0:
            boundary_method = Boundary_Method.physics
        else:
            boundary_method = None
        Vt_broken = FunctionSpace(state.mesh, BrokenElement(Vt.ufl_element()))
        rho_averaged = Function(Vt)
        self.rho_recoverer = Recoverer(rho, rho_averaged, VDG=Vt_broken, boundary_method=boundary_method)

        # define some parameters as attributes
        dt = state.dt
        R_d = state.parameters.R_d
        cp = state.parameters.cp
        cv = state.parameters.cv
        c_pv = state.parameters.c_pv
        c_pl = state.parameters.c_pl
        c_vv = state.parameters.c_vv
        R_v = state.parameters.R_v

        # make useful fields
        exner = thermodynamics.exner_pressure(state.parameters, rho_averaged, self.theta)
        T = thermodynamics.T(state.parameters, self.theta, exner, r_v=self.water_v)
        p = thermodynamics.p(state.parameters, exner)
        L_v = thermodynamics.Lv(state.parameters, T)
        R_m = R_d + R_v * self.water_v
        c_pml = cp + c_pv * self.water_v + c_pl * water_l
        c_vml = cv + c_vv * self.water_v + c_pl * water_l

        # use Teten's formula to calculate w_sat
        w_sat = thermodynamics.r_sat(state.parameters, T, p)

        # make appropriate condensation rate
        dot_r_cond = ((self.water_v - w_sat)
                      / (dt * (1.0 + ((L_v ** 2.0 * w_sat)
                                      / (cp * R_v * T ** 2.0)))))

        # make cond_rate function, that needs to be the same for all updates in one time step
        cond_rate = Function(Vt)

        # adjust cond rate so negative concentrations don't occur
        self.lim_cond_rate = Interpolator(conditional(dot_r_cond < 0,
                                                      max_value(dot_r_cond, - self.water_c / dt),
                                                      min_value(dot_r_cond, self.water_v / dt)), cond_rate)

        # tell the prognostic fields what to update to
        self.water_v_new = Interpolator(self.water_v - dt * cond_rate, Vt)
        self.water_c_new = Interpolator(self.water_c + dt * cond_rate, Vt)
        self.theta_new = Interpolator(self.theta
                                      * (1.0 + dt * cond_rate
                                         * (cv * L_v / (c_vml * cp * T)
                                            - R_v * cv * c_pml / (R_m * cp * c_vml))), Vt)

    def apply(self):
        self.rho_recoverer.project()
        for i in range(self.iterations):
            self.lim_cond_rate.interpolate()
            self.theta.assign(self.theta_new.interpolate())
            self.water_v.assign(self.water_v_new.interpolate())
            self.water_c.assign(self.water_c_new.interpolate())
示例#19
0
    def __init__(self, v_in, v_out, VDG=None, boundary_method=None):

        # check if v_in is valid
        if isinstance(v_in, expression.Expression) or not isinstance(
                v_in, (ufl.core.expr.Expr, function.Function)):
            raise ValueError(
                "Can only recover UFL expression or Functions not '%s'" %
                type(v_in))

        self.v_in = v_in
        self.v_out = v_out
        self.V = v_out.function_space()
        if VDG is not None:
            self.v = Function(VDG)
            self.interpolator = Interpolator(v_in, self.v)
        else:
            self.v = v_in
            self.interpolator = None

        self.VDG = VDG
        self.boundary_method = boundary_method
        self.averager = Averager(self.v, self.v_out)

        # check boundary method options are valid
        if boundary_method is not None:
            if boundary_method != Boundary_Method.dynamics and boundary_method != Boundary_Method.physics:
                raise ValueError(
                    "Boundary method must be a Boundary_Method Enum object.")
            if VDG is None:
                raise ValueError(
                    "If boundary_method is specified, VDG also needs specifying."
                )

            # now specify things that we'll need if we are doing boundary recovery
            if boundary_method == Boundary_Method.physics:
                # check dimensions
                if self.V.value_size != 1:
                    raise ValueError(
                        'This method only works for scalar functions.')
                self.boundary_recoverer = Boundary_Recoverer(
                    self.v_out, self.v, method=Boundary_Method.physics)
            else:

                mesh = self.V.mesh()
                # this ensures we get the pure function space, not an indexed function space
                V0 = FunctionSpace(mesh,
                                   self.v_in.function_space().ufl_element())
                VCG1 = FunctionSpace(mesh, "CG", 1)
                if V0.extruded:
                    cell = mesh._base_mesh.ufl_cell().cellname()
                    DG1_hori_elt = FiniteElement("DG",
                                                 cell,
                                                 1,
                                                 variant="equispaced")
                    DG1_vert_elt = FiniteElement("DG",
                                                 interval,
                                                 1,
                                                 variant="equispaced")
                    DG1_element = TensorProductElement(DG1_hori_elt,
                                                       DG1_vert_elt)
                else:
                    cell = mesh.ufl_cell().cellname()
                    DG1_element = FiniteElement("DG",
                                                cell,
                                                1,
                                                variant="equispaced")
                VDG1 = FunctionSpace(mesh, DG1_element)

                if self.V.value_size == 1:
                    coords_to_adjust = find_coords_to_adjust(V0, VDG1)

                    self.boundary_recoverer = Boundary_Recoverer(
                        self.v_out,
                        self.v,
                        coords_to_adjust=coords_to_adjust,
                        method=Boundary_Method.dynamics)
                else:
                    VuDG1 = VectorFunctionSpace(mesh, DG1_element)
                    coords_to_adjust = find_coords_to_adjust(V0, VuDG1)

                    # now, break the problem down into components
                    v_scalars = []
                    v_out_scalars = []
                    self.boundary_recoverers = []
                    self.project_to_scalars_CG = []
                    self.extra_averagers = []
                    coords_to_adjust_list = []
                    for i in range(self.V.value_size):
                        v_scalars.append(Function(VDG1))
                        v_out_scalars.append(Function(VCG1))
                        coords_to_adjust_list.append(
                            Function(VDG1).project(coords_to_adjust[i]))
                        self.project_to_scalars_CG.append(
                            Projector(self.v_out[i], v_out_scalars[i]))
                        self.boundary_recoverers.append(
                            Boundary_Recoverer(
                                v_out_scalars[i],
                                v_scalars[i],
                                method=Boundary_Method.dynamics,
                                coords_to_adjust=coords_to_adjust_list[i]))
                        # need an extra averager that works on the scalar fields rather than the vector one
                        self.extra_averagers.append(
                            Averager(v_scalars[i], v_out_scalars[i]))

                    # the boundary recoverer needs to be done on a scalar fields
                    # so need to extract component and restore it after the boundary recovery is done
                    self.interpolate_to_vector = Interpolator(
                        as_vector(v_out_scalars), self.v_out)
示例#20
0
    def __init__(self, state):
        super().__init__(state)

        # obtain our fields
        self.theta = state.fields('theta')
        self.water_v = state.fields('water_v')
        self.rain = state.fields('rain')
        rho = state.fields('rho')
        try:
            water_c = state.fields('water_c')
            water_l = self.rain + water_c
        except NotImplementedError:
            water_l = self.rain

        # declare function space
        Vt = self.theta.function_space()

        # make rho variables
        # we recover rho into theta space
        if state.vertical_degree == 0 and state.horizontal_degree == 0:
            boundary_method = Boundary_Method.physics
        else:
            boundary_method = None
        Vt_broken = FunctionSpace(state.mesh, BrokenElement(Vt.ufl_element()))
        rho_averaged = Function(Vt)
        self.rho_recoverer = Recoverer(rho,
                                       rho_averaged,
                                       VDG=Vt_broken,
                                       boundary_method=boundary_method)

        # define some parameters as attributes
        dt = state.timestepping.dt
        R_d = state.parameters.R_d
        cp = state.parameters.cp
        cv = state.parameters.cv
        c_pv = state.parameters.c_pv
        c_pl = state.parameters.c_pl
        c_vv = state.parameters.c_vv
        R_v = state.parameters.R_v

        # make useful fields
        Pi = thermodynamics.pi(state.parameters, rho_averaged, self.theta)
        T = thermodynamics.T(state.parameters,
                             self.theta,
                             Pi,
                             r_v=self.water_v)
        p = thermodynamics.p(state.parameters, Pi)
        L_v = thermodynamics.Lv(state.parameters, T)
        R_m = R_d + R_v * self.water_v
        c_pml = cp + c_pv * self.water_v + c_pl * water_l
        c_vml = cv + c_vv * self.water_v + c_pl * water_l

        # use Teten's formula to calculate w_sat
        w_sat = thermodynamics.r_sat(state.parameters, T, p)

        # expression for ventilation factor
        a = Constant(1.6)
        b = Constant(124.9)
        c = Constant(0.2046)
        C = a + b * (rho_averaged * self.rain)**c

        # make appropriate condensation rate
        f = Constant(5.4e5)
        g = Constant(2.55e6)
        h = Constant(0.525)
        dot_r_evap = (((1 - self.water_v / w_sat) * C *
                       (rho_averaged * self.rain)**h) /
                      (rho_averaged * (f + g / (p * w_sat))))

        # make evap_rate function, needs to be the same for all updates in one time step
        evap_rate = Function(Vt)

        # adjust evap rate so negative rain doesn't occur
        self.lim_evap_rate = Interpolator(
            conditional(
                dot_r_evap < 0, 0.0,
                conditional(self.rain < 0.0, 0.0,
                            min_value(dot_r_evap, self.rain / dt))), evap_rate)

        # tell the prognostic fields what to update to
        self.water_v_new = Interpolator(self.water_v + dt * evap_rate, Vt)
        self.rain_new = Interpolator(self.rain - dt * evap_rate, Vt)
        self.theta_new = Interpolator(
            self.theta * (1.0 - dt * evap_rate *
                          (cv * L_v / (c_vml * cp * T) - R_v * cv * c_pml /
                           (R_m * cp * c_vml))), Vt)
示例#21
0
class Boundary_Recoverer(object):
    """
    An object that performs a `recovery` process at the domain
    boundaries that has second order accuracy. This is necessary
    because the :class:`Averager` object does not recover a field
    with sufficient accuracy at the boundaries.

    The strategy is to minimise the curvature of the function in
    the boundary cells, subject to the constraints of conserved
    mass and continuity on the interior facets. The quickest way
    to perform this is by using the analytic solution and a parloop.

    Currently this is only implemented for the (DG0, DG1, CG1)
    set of spaces, and only on a `PeriodicIntervalMesh` or
    'PeriodicUnitIntervalMesh` that has been extruded.

    :arg v_CG1: the continuous function after the first recovery
             is performed. Should be in CG1. This is correct
             on the interior of the domain.
    :arg v_DG1: the function to be output. Should be in DG1.
    :arg method: a Boundary_Method Enum object.
    :arg coords_to_adjust: a DG1 field containing 1 at locations of
                           coords that must be adjusted to give they
                           effective coords.
    """
    def __init__(self,
                 v_CG1,
                 v_DG1,
                 method=Boundary_Method.physics,
                 coords_to_adjust=None):

        self.v_DG1 = v_DG1
        self.v_CG1 = v_CG1
        self.v_DG1_old = Function(v_DG1.function_space())
        self.coords_to_adjust = coords_to_adjust

        self.method = method
        mesh = v_CG1.function_space().mesh()
        VDG0 = FunctionSpace(mesh, "DG", 0)
        VCG1 = FunctionSpace(mesh, "CG", 1)

        if VDG0.extruded:
            cell = mesh._base_mesh.ufl_cell().cellname()
            DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
            DG1_vert_elt = FiniteElement("DG",
                                         interval,
                                         1,
                                         variant="equispaced")
            DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
        else:
            cell = mesh.ufl_cell().cellname()
            DG1_element = FiniteElement("DG", cell, 1, variant="equispaced")
        VDG1 = FunctionSpace(mesh, DG1_element)

        self.num_ext = Function(VDG0)

        # check function spaces of functions
        if self.method == Boundary_Method.dynamics:
            if v_CG1.function_space() != VCG1:
                raise NotImplementedError(
                    "This boundary recovery method requires v1 to be in CG1.")
            if v_DG1.function_space() != VDG1:
                raise NotImplementedError(
                    "This boundary recovery method requires v_out to be in DG1."
                )
            # check whether mesh is valid
            if mesh.topological_dimension() == 2:
                # if mesh is extruded then we're fine, but if not needs to be quads
                if not VDG0.extruded and mesh.ufl_cell().cellname(
                ) != 'quadrilateral':
                    raise NotImplementedError(
                        'For 2D meshes this recovery method requires that elements are quadrilaterals'
                    )
            elif mesh.topological_dimension() == 3:
                # assume that 3D mesh is extruded
                if mesh._base_mesh.ufl_cell().cellname() != 'quadrilateral':
                    raise NotImplementedError(
                        'For 3D extruded meshes this recovery method requires a base mesh with quadrilateral elements'
                    )
            elif mesh.topological_dimension() != 1:
                raise NotImplementedError(
                    'This boundary recovery is implemented only on certain classes of mesh.'
                )
            if coords_to_adjust is None:
                raise ValueError(
                    'Need coords_to_adjust field for dynamics boundary methods'
                )

        elif self.method == Boundary_Method.physics:
            # check that mesh is valid -- must be an extruded mesh
            if not VDG0.extruded:
                raise NotImplementedError(
                    'The physics boundary method only works on extruded meshes'
                )
            # base spaces
            cell = mesh._base_mesh.ufl_cell().cellname()
            w_hori = FiniteElement("DG", cell, 0, variant="equispaced")
            w_vert = FiniteElement("CG", interval, 1, variant="equispaced")
            # build element
            theta_element = TensorProductElement(w_hori, w_vert)
            # spaces
            Vtheta = FunctionSpace(mesh, theta_element)
            Vtheta_broken = FunctionSpace(mesh, BrokenElement(theta_element))
            if v_CG1.function_space() != Vtheta:
                raise ValueError(
                    "This boundary recovery method requires v_CG1 to be in DG0xCG1 TensorProductSpace."
                )
            if v_DG1.function_space() != Vtheta_broken:
                raise ValueError(
                    "This boundary recovery method requires v_DG1 to be in the broken DG0xCG1 TensorProductSpace."
                )
        else:
            raise ValueError(
                "Boundary method should be a Boundary Method Enum object.")

        VuDG1 = VectorFunctionSpace(VDG0.mesh(), DG1_element)
        x = SpatialCoordinate(VDG0.mesh())
        self.interpolator = Interpolator(self.v_CG1, self.v_DG1)

        if self.method == Boundary_Method.dynamics:

            # STRATEGY
            # obtain a coordinate field for all the nodes
            self.act_coords = Function(VuDG1).project(x)  # actual coordinates
            self.eff_coords = Function(VuDG1).project(
                x)  # effective coordinates
            self.output = Function(VDG1)

            shapes = {
                "nDOFs":
                self.v_DG1.function_space().finat_element.space_dimension(),
                "dim":
                np.prod(VuDG1.shape, dtype=int)
            }

            num_ext_domain = ("{{[i]: 0 <= i < {nDOFs}}}").format(**shapes)
            num_ext_instructions = ("""
            <float64> SUM_EXT = 0
            for i
                SUM_EXT = SUM_EXT + EXT_V1[i]
            end

            NUM_EXT[0] = SUM_EXT
            """)

            coords_domain = ("{{[i, j, k, ii, jj, kk, ll, mm, iii, kkk]: "
                             "0 <= i < {nDOFs} and "
                             "0 <= j < {nDOFs} and 0 <= k < {dim} and "
                             "0 <= ii < {nDOFs} and 0 <= jj < {nDOFs} and "
                             "0 <= kk < {dim} and 0 <= ll < {dim} and "
                             "0 <= mm < {dim} and 0 <= iii < {nDOFs} and "
                             "0 <= kkk < {dim}}}").format(**shapes)
            coords_insts = (
                """
                            <float64> sum_V1_ext = 0
                            <int> index = 100
                            <float64> dist = 0.0
                            <float64> max_dist = 0.0
                            <float64> min_dist = 0.0
                            """

                # only do adjustment in cells with at least one DOF to adjust
                """
                            if NUM_EXT[0] > 0
                            """

                # find the maximum distance between DOFs in this cell, to serve as starting point for finding min distances
                """
                                for i
                                    for j
                                        dist = 0.0
                                        for k
                                            dist = dist + pow(ACT_COORDS[i,k] - ACT_COORDS[j,k], 2.0)
                                        end
                                        dist = pow(dist, 0.5) {{id=sqrt_max_dist, dep=*}}
                                        max_dist = fmax(dist, max_dist) {{id=max_dist, dep=sqrt_max_dist}}
                                    end
                                end
                            """

                # loop through cells and find which ones to adjust
                """
                                for ii
                                    if EXT_V1[ii] > 0.5
                            """

                # find closest interior node
                """
                                        min_dist = max_dist
                                        index = 100
                                        for jj
                                            if EXT_V1[jj] < 0.5
                                                dist = 0.0
                                                for kk
                                                    dist = dist + pow(ACT_COORDS[ii,kk] - ACT_COORDS[jj,kk], 2)
                                                end
                                                dist = pow(dist, 0.5)
                                                if dist <= min_dist
                                                    index = jj
                                                end
                                                min_dist = fmin(min_dist, dist)
                                                for ll
                                                    EFF_COORDS[ii,ll] = 0.5 * (ACT_COORDS[ii,ll] + ACT_COORDS[index,ll])
                                                end
                                            end
                                        end
                                    else
                            """

                # for DOFs that aren't exterior, use the original coordinates
                """
                                        for mm
                                            EFF_COORDS[ii, mm] = ACT_COORDS[ii, mm]
                                        end
                                    end
                                end
                            else
                            """

                # for interior elements, just use the original coordinates
                """
                                for iii
                                    for kkk
                                        EFF_COORDS[iii, kkk] = ACT_COORDS[iii, kkk]
                                    end
                                end
                            end
                            """).format(**shapes)

            _num_ext_kernel = (num_ext_domain, num_ext_instructions)
            _eff_coords_kernel = (coords_domain, coords_insts)
            self.gaussian_elimination_kernel = kernels.GaussianElimination(
                VDG1)

            # find number of external DOFs per cell
            par_loop(_num_ext_kernel,
                     dx, {
                         "NUM_EXT": (self.num_ext, WRITE),
                         "EXT_V1": (self.coords_to_adjust, READ)
                     },
                     is_loopy_kernel=True)

            # find effective coordinates
            logger.warning(
                'Finding effective coordinates for boundary recovery. This could give unexpected results for deformed meshes over very steep topography.'
            )
            par_loop(_eff_coords_kernel,
                     dx, {
                         "EFF_COORDS": (self.eff_coords, WRITE),
                         "ACT_COORDS": (self.act_coords, READ),
                         "NUM_EXT": (self.num_ext, READ),
                         "EXT_V1": (self.coords_to_adjust, READ)
                     },
                     is_loopy_kernel=True)

        elif self.method == Boundary_Method.physics:

            self.bottom_kernel = kernels.PhysicsRecoveryBottom()
            self.top_kernel = kernels.PhysicsRecoveryTop()

    def apply(self):
        self.interpolator.interpolate()
        if self.method == Boundary_Method.physics:
            self.bottom_kernel.apply(self.v_DG1, self.v_CG1)
            self.top_kernel.apply(self.v_DG1, self.v_CG1)

        else:
            self.v_DG1_old.assign(self.v_DG1)
            self.gaussian_elimination_kernel.apply(self.v_DG1_old, self.v_DG1,
                                                   self.act_coords,
                                                   self.eff_coords,
                                                   self.num_ext)
示例#22
0
    def __init__(self, state, iterations=1):
        super().__init__(state)

        self.iterations = iterations
        # obtain our fields
        self.theta = state.fields('theta')
        self.water_v = state.fields('water_v')
        self.water_c = state.fields('water_c')
        rho = state.fields('rho')
        try:
            rain = state.fields('rain')
            water_l = self.water_c + rain
        except NotImplementedError:
            water_l = self.water_c

        # declare function space
        Vt = self.theta.function_space()

        # make rho variables
        # we recover rho into theta space
        if state.vertical_degree == 0 and state.horizontal_degree == 0:
            boundary_method = Boundary_Method.physics
        else:
            boundary_method = None
        Vt_broken = FunctionSpace(state.mesh, BrokenElement(Vt.ufl_element()))
        rho_averaged = Function(Vt)
        self.rho_recoverer = Recoverer(rho,
                                       rho_averaged,
                                       VDG=Vt_broken,
                                       boundary_method=boundary_method)

        # define some parameters as attributes
        dt = state.timestepping.dt
        R_d = state.parameters.R_d
        cp = state.parameters.cp
        cv = state.parameters.cv
        c_pv = state.parameters.c_pv
        c_pl = state.parameters.c_pl
        c_vv = state.parameters.c_vv
        R_v = state.parameters.R_v

        # make useful fields
        Pi = thermodynamics.pi(state.parameters, rho_averaged, self.theta)
        T = thermodynamics.T(state.parameters,
                             self.theta,
                             Pi,
                             r_v=self.water_v)
        p = thermodynamics.p(state.parameters, Pi)
        L_v = thermodynamics.Lv(state.parameters, T)
        R_m = R_d + R_v * self.water_v
        c_pml = cp + c_pv * self.water_v + c_pl * water_l
        c_vml = cv + c_vv * self.water_v + c_pl * water_l

        # use Teten's formula to calculate w_sat
        w_sat = thermodynamics.r_sat(state.parameters, T, p)

        # make appropriate condensation rate
        dot_r_cond = ((self.water_v - w_sat) / (dt * (1.0 +
                                                      ((L_v**2.0 * w_sat) /
                                                       (cp * R_v * T**2.0)))))

        # make cond_rate function, that needs to be the same for all updates in one time step
        cond_rate = Function(Vt)

        # adjust cond rate so negative concentrations don't occur
        self.lim_cond_rate = Interpolator(
            conditional(dot_r_cond < 0,
                        max_value(dot_r_cond, -self.water_c / dt),
                        min_value(dot_r_cond, self.water_v / dt)), cond_rate)

        # tell the prognostic fields what to update to
        self.water_v_new = Interpolator(self.water_v - dt * cond_rate, Vt)
        self.water_c_new = Interpolator(self.water_c + dt * cond_rate, Vt)
        self.theta_new = Interpolator(
            self.theta * (1.0 + dt * cond_rate *
                          (cv * L_v / (c_vml * cp * T) - R_v * cv * c_pml /
                           (R_m * cp * c_vml))), Vt)
示例#23
0
    def __init__(self, v_CG1, v_DG1, method=Boundary_Method.physics, eff_coords=None):

        self.v_DG1 = v_DG1
        self.v_CG1 = v_CG1
        self.v_DG1_old = Function(v_DG1.function_space())
        self.eff_coords = eff_coords

        self.method = method
        mesh = v_CG1.function_space().mesh()
        DG0 = FunctionSpace(mesh, "DG", 0)
        CG1 = FunctionSpace(mesh, "CG", 1)

        if DG0.extruded:
            cell = mesh._base_mesh.ufl_cell().cellname()
            DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
            DG1_vert_elt = FiniteElement("DG", interval, 1, variant="equispaced")
            DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
        else:
            cell = mesh.ufl_cell().cellname()
            DG1_element = FiniteElement("DG", cell, 1, variant="equispaced")
        DG1 = FunctionSpace(mesh, DG1_element)

        self.num_ext = find_domain_boundaries(mesh)

        # check function spaces of functions
        if self.method == Boundary_Method.dynamics:
            if v_CG1.function_space() != CG1:
                raise NotImplementedError("This boundary recovery method requires v1 to be in CG1.")
            if v_DG1.function_space() != DG1:
                raise NotImplementedError("This boundary recovery method requires v_out to be in DG1.")
            if eff_coords is None:
                raise ValueError('Need eff_coords field for dynamics boundary methods')

        elif self.method == Boundary_Method.physics:
            # check that mesh is valid -- must be an extruded mesh
            if not DG0.extruded:
                raise NotImplementedError('The physics boundary method only works on extruded meshes')
            # check that function spaces are valid
            sub_elements = v_CG1.function_space().ufl_element().sub_elements()
            if (sub_elements[0].family() not in ['Discontinuous Lagrange', 'DQ']
                    or sub_elements[1].family() != 'Lagrange'
                    or v_CG1.function_space().ufl_element().degree() != (0, 1)):
                raise ValueError("This boundary recovery method requires v_CG1 to be in DG0xCG1 TensorProductSpace.")

            brok_elt = v_DG1.function_space().ufl_element()
            if (brok_elt.degree() != (0, 1)
                or (type(brok_elt) is not BrokenElement
                    and (brok_elt.sub_elements[0].family() not in ['Discontinuous Lagrange', 'DQ']
                         or brok_elt.sub_elements[1].family() != 'Discontinuous Lagrange'))):
                raise ValueError("This boundary recovery method requires v_DG1 to be in the broken DG0xCG1 TensorProductSpace.")
        else:
            raise ValueError("Boundary method should be a Boundary Method Enum object.")

        vec_DG1 = VectorFunctionSpace(DG0.mesh(), DG1_element)
        x = SpatialCoordinate(DG0.mesh())
        self.interpolator = Interpolator(self.v_CG1, self.v_DG1)

        if self.method == Boundary_Method.dynamics:

            # STRATEGY
            # obtain a coordinate field for all the nodes
            self.act_coords = Function(vec_DG1).project(x)  # actual coordinates
            self.eff_coords = eff_coords  # effective coordinates
            self.output = Function(DG1)
            self.on_exterior = find_domain_boundaries(mesh)

            self.gaussian_elimination_kernel = kernels.GaussianElimination(DG1)

        elif self.method == Boundary_Method.physics:

            self.bottom_kernel = kernels.PhysicsRecoveryBottom()
            self.top_kernel = kernels.PhysicsRecoveryTop()
    def __init__(self, diagnostic_variables, prognostic_variables, outputting,
                 simulation_parameters):

        self.diagnostic_variables = diagnostic_variables
        self.prognostic_variables = prognostic_variables
        self.outputting = outputting
        self.simulation_parameters = simulation_parameters
        Dt = Constant(simulation_parameters['dt'][-1])
        Ld = simulation_parameters['Ld'][-1]
        u = self.prognostic_variables.u
        Xi = self.prognostic_variables.dXi
        Vu = u.function_space()
        vector_u = True if Vu.ufl_element() == VectorElement else False
        ones = Function(
            VectorFunctionSpace(self.prognostic_variables.mesh, "CG",
                                1)).project(as_vector([Constant(1.0)]))
        self.to_update_constants = False
        self.interpolators = []
        self.projectors = []
        self.solvers = []

        mesh = u.function_space().mesh()
        x, = SpatialCoordinate(mesh)
        alphasq = simulation_parameters['alphasq'][-1]
        periodic = simulation_parameters['periodic'][-1]

        # do peakon data checks here
        true_peakon_data = simulation_parameters['true_peakon_data'][-1]
        if true_peakon_data is not None:
            self.true_peakon_file = Dataset(
                'results/' + true_peakon_data + '/data.nc', 'r')
            # check length of file is correct
            ndump = simulation_parameters['ndump'][-1]
            tmax = simulation_parameters['tmax'][-1]
            dt = simulation_parameters['dt'][-1]
            if len(self.true_peakon_file['time'][:]) != int(tmax /
                                                            (ndump * dt)) + 1:
                raise ValueError(
                    'If reading in true peakon data, the dump frequency must be the same as that used for the true peakon data.'
                    +
                    ' Length of true peakon data as %i, but proposed length is %i'
                    % (len(self.true_peakon_file['time'][:]),
                       int(tmax / (ndump * dt)) + 1))
            if self.true_peakon_file['p'][:].shape != (int(tmax /
                                                           (ndump * dt)) +
                                                       1, ):
                raise ValueError(
                    'True peakon data shape %i must be the same shape as proposed data %i'
                    % ((int(tmax / (ndump * dt)) + 1, ),
                       self.true_peakon_file['p'][:].shape))

        # do peakon data checks here
        true_mean_peakon_data = simulation_parameters['true_mean_peakon_data'][
            -1]
        if true_mean_peakon_data is not None:
            self.true_mean_peakon_file = Dataset(
                'results/' + true_mean_peakon_data + '/data.nc', 'r')
            # check length of file is correct
            ndump = simulation_parameters['ndump'][-1]
            tmax = simulation_parameters['tmax'][-1]
            dt = simulation_parameters['dt'][-1]
            if len(self.true_mean_peakon_file['time'][:]) != int(tmax /
                                                                 (ndump * dt)):
                raise ValueError(
                    'If reading in true peakon data, the dump frequency must be the same as that used for the true peakon data.'
                )
            if self.true_mean_peakon_file['p'][:].shape != (int(
                    tmax / (ndump * dt)), ):
                raise ValueError(
                    'True peakon data must have same shape as proposed data!')

        for key, value in self.diagnostic_variables.fields.items():

            if key == 'uscalar':
                uscalar = self.diagnostic_variables.fields['uscalar']
                u_interpolator = Interpolator(dot(ones, u), uscalar)
                self.interpolators.append(u_interpolator)

            elif key == 'Euscalar':
                Eu = self.prognostic_variables.Eu
                Euscalar = self.diagnostic_variables.fields['Euscalar']
                Eu_interpolator = Interpolator(dot(ones, Eu), Euscalar)
                self.interpolators.append(Eu_interpolator)

            elif key == 'Xiscalar':
                Xi = self.prognostic_variables.dXi
                Xiscalar = self.diagnostic_variables.fields['Xiscalar']
                Xi_interpolator = Interpolator(dot(ones, Xi), Xiscalar)
                self.interpolators.append(Xi_interpolator)

            elif key == 'du':
                if type(u.function_space().ufl_element()) == VectorElement:
                    u_to_project = self.diagnostic_variables.fields['uscalar']
                else:
                    u_to_project = u
                du = self.diagnostic_variables.fields['du']
                du_projector = Projector(u_to_project.dx(0), du)
                self.projectors.append(du_projector)

            elif key == 'jump_du':
                du = self.diagnostic_variables.fields['du']
                jump_du = self.diagnostic_variables.fields['jump_du']
                V = jump_du.function_space()
                jtrial = TrialFunction(V)
                psi = TestFunction(V)
                Lj = psi('+') * abs(jump(du)) * dS
                aj = psi('+') * jtrial('+') * dS
                jprob = LinearVariationalProblem(aj, Lj, jump_du)
                jsolver = LinearVariationalSolver(jprob)
                self.solvers.append(jsolver)

            elif key == 'du_smooth':
                du = self.diagnostic_variables.fields['du']
                du_smooth = self.diagnostic_variables.fields['du_smooth']
                projector = Projector(du, du_smooth)
                self.projectors.append(projector)

            elif key == 'u2_flux':
                gamma = simulation_parameters['gamma'][-1]
                u2_flux = self.diagnostic_variables.fields['u2_flux']
                xis = self.prognostic_variables.pure_xi_list
                xis_x = []
                xis_xxx = []
                CG1 = FunctionSpace(mesh, "CG", 1)
                psi = TestFunction(CG1)
                for xi in xis:
                    xis_x.append(Function(CG1).project(xi.dx(0)))
                for xi_x in xis_x:
                    xi_xxx = Function(CG1)
                    form = (psi * xi_xxx + psi.dx(0) * xi_x.dx(0)) * dx
                    prob = NonlinearVariationalProblem(form, xi_xxx)
                    solver = NonlinearVariationalSolver(prob)
                    solver.solve()
                    xis_xxx.append(xi_xxx)

                flux_expr = 0.0 * x
                for xi, xi_x, xi_xxx in zip(xis, xis_x, xis_xxx):
                    flux_expr += (6 * u.dx(0) * xi + 12 * u * xi_x + gamma *
                                  xi_xxx) * (6 * u.dx(0) * xi + 24 * u * xi_x +
                                             gamma * xi_xxx)
                projector = Projector(flux_expr, u2_flux)
                self.projectors.append(projector)

            elif key == 'a':
                # find  6 * u_x * Xi + gamma * Xi_xxx
                mesh = u.function_space().mesh()
                gamma = simulation_parameters['gamma'][-1]
                a_flux = self.diagnostic_variables.fields['a']
                xis = self.prognostic_variables.pure_xis
                xis_x = []
                xis_xxx = []
                CG1 = FunctionSpace(mesh, "CG", 1)
                psi = TestFunction(CG1)
                for xi in xis:
                    xis_x.append(Function(CG1).project(xi.dx(0)))
                for xi_x in xis_x:
                    xi_xxx = Function(CG1)
                    form = (psi * xi_xxx + psi.dx(0) * xi_x.dx(0)) * dx
                    prob = NonlinearVariationalProblem(form, xi_xxx)
                    solver = NonlinearVariationalSolver(prob)
                    solver.solve()
                    xis_xxx.append(xi_xxx)

                x, = SpatialCoordinate(mesh)
                a_expr = 0.0 * x
                for xi, xi_x, xi_xxx in zip(xis, xis_x, xis_xxx):
                    a_expr += 6 * u.dx(0) * xi + gamma * xi_xxx
                projector = Projector(a_expr, a_flux)
                self.projectors.append(projector)

            elif key == 'b':
                # find 12 * u * Xi_x
                mesh = u.function_space().mesh()
                gamma = simulation_parameters['gamma'][-1]
                b_flux = self.diagnostic_variables.fields['b']
                xis = self.prognostic_variables.pure_xis

                x, = SpatialCoordinate(mesh)
                b_expr = 0.0 * x
                for xi, xi_x, xi_xxx in zip(xis, xis_x, xis_xxx):
                    b_expr += 12 * u * xi.dx(0)
                projector = Projector(b_expr, b_flux)
                self.projectors.append(projector)

            elif key == 'kdv_1':
                # find the first part of the kdv form
                u0 = prognostic_variables.u0
                uh = (u + u0) / 2
                us = Dt * uh + sqrt(Dt) * Xi
                psi = TestFunction(Vu)
                du_1 = self.diagnostic_variables.fields['kdv_1']

                eqn = psi * du_1 * dx - 6 * psi.dx(0) * uh * us * dx
                prob = NonlinearVariationalProblem(eqn, du_1)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'kdv_2':
                # find the second part of the kdv form
                u0 = prognostic_variables.u0
                uh = (u + u0) / 2
                us = Dt * uh + sqrt(Dt) * Xi
                psi = TestFunction(Vu)
                du_2 = self.diagnostic_variables.fields['kdv_2']

                eqn = psi * du_2 * dx + 6 * psi * uh * us.dx(0) * dx
                prob = NonlinearVariationalProblem(eqn, du_2)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'kdv_3':
                # find the third part of the kdv form
                u0 = prognostic_variables.u0
                uh = (u + u0) / 2
                us = Dt * uh + sqrt(Dt) * Xi
                du_3 = self.diagnostic_variables.fields['kdv_3']
                gamma = simulation_parameters['gamma'][-1]

                phi = TestFunction(Vu)
                F = Function(Vu)

                eqn = (phi * F * dx + phi.dx(0) * us.dx(0) * dx)
                prob = NonlinearVariationalProblem(eqn, F)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

                self.projectors.append(Projector(-gamma * F.dx(0), du_3))

                # nu = TestFunction(Vu)
                # back_eqn = nu * du_3 * dx - gamma * nu.dx(0) * F * dx
                # back_prob = NonlinearVariationalProblem(back_eqn, du_3)
                # back_solver = NonlinearVariationalSolver(back_prob)
                # self.solvers.append(solver)

            elif key == 'm':

                m = self.diagnostic_variables.fields['m']
                phi = TestFunction(Vu)
                eqn = phi * m * dx - phi * u * dx - alphasq * phi.dx(0) * u.dx(
                    0) * dx
                prob = NonlinearVariationalProblem(eqn, m)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'u_xx':

                u_xx = self.diagnostic_variables.fields['u_xx']
                phi = TestFunction(Vu)
                eqn = phi * u_xx * dx + phi.dx(0) * u_xx.dx(0) * dx
                prob = NonlinearVariationalProblem(eqn, u_xx)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'u_sde':
                self.to_update_constants = True
                self.Ld = Ld
                self.alphasq = alphasq
                self.p = Constant(1.0 * 0.5 * (1 + exp(-Ld / sqrt(alphasq))) /
                                  (1 - exp(-Ld / sqrt(alphasq))))
                self.q = Constant(Ld / 2)

                u_sde = self.diagnostic_variables.fields['u_sde']
                if periodic:
                    expr = conditional(
                        x < self.q - Ld / 2,
                        self.p * ((exp(-(x - self.q + Ld) / sqrt(alphasq)) +
                                   exp(-Ld / sqrt(alphasq)) * exp(
                                       (x - self.q + Ld) / sqrt(alphasq))) /
                                  (1 - exp(-Ld / sqrt(alphasq)))),
                        conditional(
                            x < self.q + Ld / 2,
                            self.p * ((exp(-sqrt((self.q - x)**2 / alphasq)) +
                                       exp(-Ld / sqrt(alphasq)) *
                                       exp(sqrt((self.q - x)**2 / alphasq))) /
                                      (1 - exp(-Ld / sqrt(alphasq)))),
                            self.p *
                            ((exp(-(self.q + Ld - x) / sqrt(alphasq)) +
                              exp(-Ld / sqrt(alphasq) * exp(
                                  (self.q + Ld - x) / sqrt(alphasq)))) /
                             (1 - exp(-Ld / sqrt(alphasq))))))
                else:
                    expr = conditional(
                        x < self.q - Ld / 2,
                        self.p * exp(-(x - self.q + Ld) / sqrt(alphasq)),
                        conditional(
                            x < self.q + Ld / 2,
                            self.p * exp(-sqrt((self.q - x)**2 / alphasq)),
                            self.p * exp(-(self.q + Ld - x) / sqrt(alphasq))))

                self.interpolators.append(Interpolator(expr, u_sde))

            elif key == 'u_sde_weak':
                u_sde = self.diagnostic_variables.fields['u_sde']
                u_sde_weak = self.diagnostic_variables.fields['u_sde_weak']
                psi = TestFunction(Vu)

                eqn = psi * u_sde_weak * dx - psi * (u - u_sde) * dx
                prob = NonlinearVariationalProblem(eqn, u_sde_weak)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'u_sde_mean':
                self.to_update_constants = True
                self.p = Constant(1.0)
                self.q = Constant(Ld / 2)

                if periodic:
                    raise NotImplementedError(
                        'u_sde_mean not yet implemented for periodic peakon')

                u_sde = self.diagnostic_variables.fields['u_sde_mean']
                expr = conditional(
                    x < self.q - Ld / 2,
                    self.p * exp(-(x - self.q + Ld) / sqrt(alphasq)),
                    conditional(
                        x < self.q + Ld / 2,
                        self.p * exp(-sqrt((self.q - x)**2 / alphasq)),
                        self.p * exp(-(self.q + Ld - x) / sqrt(alphasq))))
                self.interpolators.append(Interpolator(expr, u_sde))

            elif key == 'u_sde_weak_mean':
                u_sde = self.diagnostic_variables.fields['u_sde_mean']
                u_sde_weak = self.diagnostic_variables.fields[
                    'u_sde_weak_mean']
                psi = TestFunction(Vu)

                eqn = psi * u_sde_weak * dx - psi * (u - u_sde) * dx
                prob = NonlinearVariationalProblem(eqn, u_sde_weak)
                solver = NonlinearVariationalSolver(prob)
                self.solvers.append(solver)

            elif key == 'pure_xi':
                pure_xi = 0.0 * x
                for xi in self.prognostic_variables.pure_xi_list:
                    if vector_u:
                        pure_xi += dot(ones, xi)
                    else:
                        pure_xi += xi
                Xiscalar = self.diagnostic_variables.fields['pure_xi']
                Xi_interpolator = Interpolator(pure_xi, Xiscalar)
                self.interpolators.append(Xi_interpolator)

            elif key == 'pure_xi_x':
                pure_xi_x = 0.0 * x
                for xix in self.prognostic_variables.pure_xi_x_list:
                    if vector_u:
                        pure_xi_x += dot(ones, xix)
                    else:
                        pure_xi_x += xix
                Xiscalar = self.diagnostic_variables.fields['pure_xi_x']
                Xi_interpolator = Interpolator(pure_xi_x, Xiscalar)
                self.interpolators.append(Xi_interpolator)

            elif key == 'pure_xi_xx':
                pure_xi_xx = 0.0 * x
                for xixx in self.prognostic_variables.pure_xi_xx_list:
                    if vector_u:
                        pure_xi_xx += dot(ones, xixx)
                    else:
                        pure_xi_xx += xixx
                Xiscalar = self.diagnostic_variables.fields['pure_xi_xx']
                Xi_interpolator = Interpolator(pure_xi_xx, Xiscalar)
                self.interpolators.append(Xi_interpolator)

            elif key == 'pure_xi_xxx':
                pure_xi_xxx = 0.0 * x
                for xixxx in self.prognostic_variables.pure_xi_xxx_list:
                    if vector_u:
                        pure_xi_xxx += dot(ones, xixxx)
                    else:
                        pure_xi_xxx += xixxx
                Xiscalar = self.diagnostic_variables.fields['pure_xi_xxx']
                Xi_interpolator = Interpolator(pure_xi_xxx, Xiscalar)
                self.interpolators.append(Xi_interpolator)

            elif key == 'pure_xi_xxxx':
                pure_xi_xxxx = 0.0 * x
                for xixxxx in self.prognostic_variables.pure_xi_xx_list:
                    if vector_u:
                        pure_xi_xxxx += dot(ones, xixxxx)
                    else:
                        pure_xi_xxxx += xixxxx
                Xiscalar = self.diagnostic_variables.fields['pure_xi_xxxx']
                Xi_interpolator = Interpolator(pure_xi_xxxx, Xiscalar)
                self.interpolators.append(Xi_interpolator)

            else:
                raise NotImplementedError('Diagnostic %s not yet implemented' %
                                          key)