Example #1
0
 def apply_matvec(self, A, b, symmetric):
     b.allocate(A, dim=0)
     for i in range(len(self)):
         for bc in self[i]:
             for j in range(len(self)):
                 if i == j:
                     if numpy.isscalar(A[i, i]):
                         # Convert to a diagonal matrix, so that the individual rows can be modified
                         import block.algebraic
                         A[i, i] = block.algebraic.active_backend(
                         ).create_identity(b[i], val=A[i, i])
                     if symmetric:
                         bc.zero_columns(A[i, i], b[i], self.signs[i])
                     else:
                         bc.apply(A[i, i], b[i])
                 else:
                     if numpy.isscalar(A[i, j]):
                         if A[i, j] != 0:
                             dolfin.error(
                                 "can't modify block (%d,%d) for BC, expected a GenericMatrix"
                                 % (i, j))
                         continue
                     bc.zero(A[i, j])
                     if symmetric:
                         bc.zero_columns(A[j, i], b[j])
Example #2
0
    def expr2function(self, expr, function):
        """ Convert an expression into a function. How this is done is
        determined by the parameters (assemble, project or interpolate).
        """
        space = function.function_space()

        if self.params.expr2function == "assemble":
            # Compute average values of expr for each cell and place in a DG0 space
            test = TestFunction(space)
            scale = 1.0 / CellVolume(space.mesh())
            assemble(scale * inner(expr, test) * dx, tensor=function.vector())
            return function

        elif self.params.expr2function == "project":
            # TODO: Avoid superfluous function creation with fenics-dev/1.5 by using:
            #project(expr, space, function=function)
            function.assign(project(expr, space))
            return function

        elif self.params.expr2function == "interpolate":
            # TODO: Need interpolation with code generated from expr, waiting for uflacs work.
            function.interpolate(
                expr)  # Currently only works if expr is a single Function
            return function

        else:
            error(
                "No action selected, need to choose either assemble, project or interpolate."
            )
Example #3
0
    def _action_save(self, field, data, timestep, t):
        "Apply the 'save' action to computed field data."
        field_name = field.name

        # Create save folder first time
        self._create_savedir(field_name)

        # Collect metadata shared between data types
        metadata = {
            'timestep': timestep,
            'time': t,
        }

        # Rename Functions to get the right name in file
        # (NB! This has the obvious side effect!)
        # TODO: We don't need to cache a distinct Function
        # object like we do for plotting, or?
        if isinstance(data, Function):
            data.rename(field_name, "Function produced by cbcpost.")

        # Get list of file formats
        save_as = _get_save_formats(field, data)

        # Write data to file for each filetype
        for saveformat in save_as:
            # Write data to file depending on type
            if saveformat == 'pvd':
                metadata[saveformat] = self._update_pvd_file(
                    field_name, saveformat, data, timestep, t)
            elif saveformat == 'xdmf':
                metadata[saveformat] = self._update_xdmf_file(
                    field_name, saveformat, data, timestep, t)
            elif saveformat == 'xml':
                metadata[saveformat] = self._update_xml_file(
                    field_name, saveformat, data, timestep, t)
            elif saveformat == 'xml.gz':
                metadata[saveformat] = self._update_xml_gz_file(
                    field_name, saveformat, data, timestep, t)
            elif saveformat == 'txt':
                metadata[saveformat] = self._update_txt_file(
                    field_name, saveformat, data, timestep, t)
            elif saveformat == 'hdf5':
                metadata[saveformat] = self._update_hdf5_file(
                    field_name, saveformat, data, timestep, t)
            elif saveformat == 'shelve':
                metadata[saveformat] = self._update_shelve_file(
                    field_name, saveformat, data, timestep, t)
            else:
                error("Unknown save format %s." % (saveformat, ))
            self._timer.completed("PP: save %s %s" % (field_name, saveformat))

        # Write new data to metadata file
        self._update_metadata_file(field_name, data, t, timestep, save_as,
                                   metadata)
        self._timer.completed("PP: update metadata")
        self._fill_playlog(field, timestep, save_as)

        self._timer.completed("PP: update playlog")
Example #4
0
    def __init__(self, u, boundary_is_streamline=False, degree=1):
        """
        Heavily based on
        https://github.com/mikaem/fenicstools/blob/master/fenicstools/Streamfunctions.py
        
        Stream function for a given general 2D velocity field.
        The boundary conditions are weakly imposed through the term
        
            inner(q, grad(psi)*n)*ds, 
        
        where grad(psi) = [-v, u] is set on all boundaries. 
        This should work for any collection of boundaries: 
        walls, inlets, outlets etc.    
        """
        Vu = u[0].function_space()
        mesh = Vu.mesh()

        # Check dimension
        if not mesh.geometry().dim() == 2:
            df.error("Stream-function can only be computed in 2D.")

        # Define the weak form
        V = df.FunctionSpace(mesh, 'CG', degree)
        q = df.TestFunction(V)
        psi = df.TrialFunction(V)
        n = df.FacetNormal(mesh)
        a = df.dot(df.grad(q), df.grad(psi)) * df.dx
        L = df.dot(q, df.curl(u)) * df.dx

        if boundary_is_streamline:
            # Strongly set psi = 0 on entire domain boundary
            self.bcs = [df.DirichletBC(V, df.Constant(0), df.DomainBoundary())]
            self.normalize = False
        else:
            self.bcs = []
            self.normalize = True
            L = L + q * (n[1] * u[0] - n[0] * u[1]) * df.ds

        # Create preconditioned iterative solver
        solver = df.PETScKrylovSolver('gmres', 'hypre_amg')
        solver.parameters['nonzero_initial_guess'] = True
        solver.parameters['relative_tolerance'] = 1e-10
        solver.parameters['absolute_tolerance'] = 1e-10

        # Store for later computation
        self.psi = df.Function(V)
        self.A = df.assemble(a)
        self.L = L
        self.mesh = mesh
        self.solver = solver
        self._triangulation = None
Example #5
0
def create_dirichlet_conditions(values, boundaries, function_space):
    """Create Dirichlet boundary conditions for given boundary values,
    boundaries and function space."""

    # Check that the size matches
    if len(values) != len(boundaries):
        error("The number of Dirichlet values does not match the number of Dirichlet boundaries.")

    info("Creating %d Dirichlet boundary condition(s)." % len(values))

    # Create Dirichlet conditions
    bcs = []
    for (i, value) in enumerate(values):

        # Get current boundary
        boundary = boundaries[i]

        if isinstance(value, tuple):
            subdim = value[1]
            value = value[0]
            temp_function_space = function_space.sub(subdim)
        else:
            temp_function_space = function_space

        
         # Case 0: boundary is a string
        if isinstance(boundary, str):
            boundary = CompiledSubDomain(boundary)
            bc = DirichletBC(temp_function_space, value, boundary)

        # Case 1: boundary is a SubDomain
        elif isinstance(boundary, SubDomain):
            bc = DirichletBC(temp_function_space, value, boundary)

        # Case 2: boundary is defined by a MeshFunction
        elif isinstance(boundary, tuple):
            mesh_function, index = boundary
            bc = DirichletBC(temp_function_space, value, mesh_function, index)

        # Unhandled case
        else:
            error("Unhandled boundary specification for boundary condition. "
                  "Expecting a string, a SubDomain or a (MeshFunction, int) tuple.")

        bcs.append(bc)

    return bcs
Example #6
0
    def __init__(self, parameters=None, solver="ipcs"):
        "Create Navier-Stokes problem"

        self.parameters = Parameters("problem_parameters")

        # Create solver
        if solver == "taylor-hood":
            info("Using Taylor-Hood based Navier-Stokes solver")
            self.solver = TaylorHoodSolver(self)
        elif solver == "ipcs":
            info("Using IPCS based Navier-Stokes solver")
            self.solver = NavierStokesSolver(self)
        else:
            error("Unknown Navier--Stokes solver: %s" % solver)

        # Set up parameters
        self.parameters.add(self.solver.parameters)
Example #7
0
    def __init__(self, parameters=None, solver="ipcs"):
        "Create Navier-Stokes problem"

        self.parameters = Parameters("problem_parameters")

        # Create solver
        if solver == "taylor-hood":
            info("Using Taylor-Hood based Navier-Stokes solver")
            self.solver = TaylorHoodSolver(self)
        elif solver == "ipcs":
            info("Using IPCS based Navier-Stokes solver")
            self.solver = NavierStokesSolver(self)
        else:
            error("Unknown Navier--Stokes solver: %s" % solver)

        # Set up parameters
        self.parameters.add(self.solver.parameters)
Example #8
0
def create_dirichlet_conditions(values, boundaries, function_spaces):
    """Create Dirichlet boundary conditions for given boundary values,
    boundaries and function space."""

    # Check that the size matches
    if len(values) != len(boundaries):
        error(
            "The number of Dirichlet values does not match the number of Dirichlet boundaries."
        )
    if len(values) != len(function_spaces):
        error(
            "The number of Dirichlet values does not match the number of function spaces."
        )
    if len(boundaries) != len(function_spaces):
        error(
            "The number of Dirichlet boundaries  does not match the number of function spaces."
        )

    info("Creating %d Dirichlet boundary condition(s)." % len(values))

    # Create Dirichlet conditions
    bcs = []
    for (i, value) in enumerate(values):

        # Get current boundary
        boundary = boundaries[i]
        function_space = function_spaces[i]

        # Case 0: boundary is a string
        if isinstance(boundary, str):
            boundary = CompiledSubDomain(boundary)
            bc = DirichletBC(function_space, value, boundary)

        # Case 1: boundary is a SubDomain
        elif isinstance(boundary, SubDomain):
            bc = DirichletBC(function_space, value, boundary)

        # Case 2: boundary is defined by a MeshFunction

        elif isinstance(boundary, tuple):
            mesh_function, index = boundary
            bc = DirichletBC(function_space, value, mesh_function, index)

        # Unhandled case
        else:
            error(
                "Unhandled boundary specification for boundary condition. "
                "Expecting a string, a SubDomain or a (MeshFunction, int) tuple."
            )

        bcs.append(bc)

    return bcs
Example #9
0
def compute_regular_timesteps(problem):
    """Compute fixed timesteps for problem.

    The first timestep will be T0 while the last timestep will be in the interval [T, T+dt).

    Returns (dt, timesteps, start_timestep).
    """
    # Get the time range and timestep from the problem
    T0 = problem.params.T0
    T = problem.params.T
    dt = problem.params.dt
    start_timestep = problem.params.start_timestep

    # Compute regular timesteps, including T0 and T
    timesteps = arange(T0, T+dt, dt)

    if abs(dt - (timesteps[1]-timesteps[0])) > 1e-8:
        error("Computed timestep size does not match specified dt.")

    if timesteps[-1] < T - dt*1e-6:
        error("Computed timesteps range does not include end time.")

    if timesteps[-1] > T + dt*1e-6:
        cbc_warning("End time for simulation does not match end time set for problem (T-T0 not a multiple of dt).")

    if start_timestep < 0 or start_timestep >= len(timesteps):
        error("start_timestep is beyond the computed timesteps.")

    return dt, timesteps, start_timestep
Example #10
0
 def apply_matvec(self, A, b, symmetric):
     b.allocate(A, dim=0)
     for i in range(len(self)):
         for bc in self[i]:
             for j in range(len(self)):
                 if i==j:
                     if numpy.isscalar(A[i,i]):
                         # Convert to a diagonal matrix, so that the individual rows can be modified
                         import block.algebraic
                         A[i,i] = block.algebraic.active_backend().create_identity(b[i], val=A[i,i])
                     if symmetric:
                         bc.zero_columns(A[i,i], b[i], self.signs[i])
                     else:
                         bc.apply(A[i,i], b[i])
                 else:
                     if numpy.isscalar(A[i,j]):
                         if A[i,j] != 0:
                             dolfin.error("can't modify block (%d,%d) for BC, expected a GenericMatrix" % (i,j))
                         continue
                     bc.zero(A[i,j])
                     if symmetric:
                         bc.zero_columns(A[j,i], b[j])
 def set_initial_conditions(self, **init):
     "Update initial_conditions in model"
     for init_name, init_value in init.items():
         if init_name not in self._initial_conditions:
             error("'%s' is not a parameter in %s" %(init_name, self))
         if not isinstance(init_value, (float, int)) and not isinstance(init_value._cpp_object, GenericFunction):
             error("'%s' is not a scalar or a GenericFunction" % init_name)
         if hasattr(init_value, "_cpp_object") and isinstance(init_value._cpp_object, GenericFunction) and \
            init_value._cpp_object.value_size() != 1:
             error("expected the value_size of '%s' to be 1" % init_name)
         self._initial_conditions[init_name] = init_value
    def set_parameters(self, **params):
        "Update parameters in model"
        for param_name, param_value in params.items():
            if param_name not in self._parameters:
                error("'%s' is not a parameter in %s" %(param_name, self))
            if (not isinstance(param_value, (float, int))
                and not isinstance(param_value._cpp_object, GenericFunction)):
                error("'%s' is not a scalar or a GenericFunction" % param_name)
                if hasattr(param_value, "_cpp_object") and\
                   isinstance(param_value._cpp_object, GenericFunction) and \
                   param_value._cpp_object.value_size() != 1:
                    error("expected the value_size of '%s' to be 1" % param_name)

            self._parameters[param_name] = param_value
Example #13
0
def space_from_string(space_string: str, mesh: df.Mesh,
                      dim: int) -> df.FunctionSpace:
    """
    Constructed a finite elements space from a string
    representation of the space

    Arguments
    ---------
    space_string : str
        A string on the form {familiy}_{degree} which
        determines the space. Example 'Lagrange_1'.
    mesh : dolfin.Mesh
        The mesh
    dim : int
        1 for scalar space, 3 for vector space.
    """
    family, degree = space_string.split("_")

    if dim == 3:
        V = df.FunctionSpace(
            mesh,
            df.VectorElement(
                family=family,
                cell=mesh.ufl_cell(),
                degree=int(degree),
                quad_scheme="default",
            ),
        )
    elif dim == 1:
        V = df.FunctionSpace(
            mesh,
            df.FiniteElement(
                family=family,
                cell=mesh.ufl_cell(),
                degree=int(degree),
                quad_scheme="default",
            ),
        )
    else:
        raise df.error("Cannot create function space of dimension {dim}")

    return V
Example #14
0
 def conductivities(self):
     error("Need to prescribe conducitivites")
Example #15
0
 def mesh(self):
     error("Need to prescribe domain")
Example #16
0
def scalar_laplacians(mesh):
    """
    Calculate the laplacians needed by fiberrule algorithms
    
    Arguments
    ---------
    mesh : dolfin.Mesh
       A dolfin mesh with marked boundaries:
       base = 10, rv = 20, lv = 30, epi = 40
       The base is assumed placed at x=0
    
    """

    if not isinstance(mesh, d.Mesh):
        raise TypeError("Expected a dolfin.Mesh as the mesh argument.")

    # Init connectivities
    mesh.init(2)
    facet_markers = d.MeshFunction("size_t", mesh, 2, mesh.domains())

    # Boundary markers, solutions and cases
    markers = dict(base=10, rv=20, lv=30, epi=40, apex=50)

    # Solver parameters
    solver_param=dict(solver_parameters=dict(
        preconditioner="ml_amg" if d.has_krylov_solver_preconditioner("ml_amg") \
        else "default", linear_solver="gmres"))

    cases = ["rv", "lv", "epi"]
    boundaries = cases + ["base"]

    # Check that all boundary faces are marked
    num_boundary_facets = d.BoundaryMesh(mesh, "exterior").num_cells()

    if num_boundary_facets != sum(np.sum(\
        facet_markers.array()==markers[boundary])\
                                  for boundary in boundaries):
        d.error("Not all boundary faces are marked correctly. Make sure all "\
                "boundary facets are marked as: base = 10, rv = 20, lv = 30, "\
                "epi = 40.")

    # Coords and cells
    coords = mesh.coordinates()
    cells_info = mesh.cells()

    # Find apex by solving a laplacian with base solution = 0
    # Create Base variational problem
    V = d.FunctionSpace(mesh, "CG", 1)

    u = d.TrialFunction(V)
    v = d.TestFunction(V)

    a = d.dot(d.grad(u), d.grad(v)) * d.dx
    L = v * d.Constant(1) * d.dx

    DBC_10 = d.DirichletBC(V, 1, facet_markers, markers["base"], "topological")

    # Create solutions
    solutions = dict(
        (what, d.Function(V)) for what in markers if what != "base")

    d.solve(a == L,
            solutions["apex"],
            DBC_10,
            solver_parameters={"linear_solver": "gmres"})

    apex_values = solutions["apex"].vector().array()
    apex_values[d.dof_to_vertex_map(V)] = solutions["apex"].vector().array()
    ind_apex_max = apex_values.argmax()
    apex_coord = coords[ind_apex_max, :]

    # Update rhs
    L = v * d.Constant(0) * d.dx

    d.info("  Apex coord: ({0}, {1}, {2})".format(*apex_coord))
    d.info("  Num coords: {0}".format(len(coords)))
    d.info("  Num cells: {0}".format(len(cells_info)))

    # Calculate volume
    volume = 0.0
    for cell in d.cells(mesh):
        volume += cell.volume()

    d.info("  Volume: {0}".format(volume))
    d.info("")

    # Cases
    # =====
    #
    # 1) base: 1, apex: 0
    # 2) lv: 1, rv, epi: 0
    # 3) rv: 1, lv, epi: 0
    # 4) epi: 1, rv, lv: 0

    class ApexDomain(d.SubDomain):
        def inside(self, x, on_boundary):
            return d.near(x[0], apex_coord[0]) and d.near(x[1], apex_coord[1]) and \
                   d.near(x[2], apex_coord[2])

    apex_domain = ApexDomain()

    # Case 1:
    Poisson = 1
    DBC_11 = d.DirichletBC(V, 0, apex_domain, "pointwise")

    # Using Poisson
    if Poisson:
        d.solve(a == L,
                solutions["apex"], [DBC_10, DBC_11],
                solver_parameters={"linear_solver": "gmres"})

    # Using Eikonal equation
    else:
        Le = v * d.Constant(1) * d.dx
        d.solve(a == Le,
                solutions["apex"],
                DBC_11,
                solver_parameters={"linear_solver": "gmres"})

        # Create Eikonal problem
        eps = d.Constant(mesh.hmax() / 25)
        y = solutions["apex"]
        F = d.sqrt(d.inner(d.grad(y), d.grad(y)))*v*d.dx - \
            d.Constant(1)*v*d.dx + eps*d.inner(d.grad(y), d.grad(v))*d.dx
        d.solve(F == 0,
                y,
                DBC_11,
                solver_parameters={
                    "linear_solver": "lu",
                    "newton_solver": {
                        "relative_tolerance": 1e-5
                    }
                })

    # Check that solution of the three last cases all sum to 1.
    sol = solutions["apex"].vector().copy()
    sol[:] = 0.0

    # Iterate over the three different cases
    for case in cases:

        # Solve linear system
        bcs = [d.DirichletBC(V, 1 if what == case else 0, \
                             facet_markers, markers[what], "topological") \
               for what in cases]
 def I(self, v, s, time=None):
     "Return the ionic current."
     error("Must define I = I(v, s)")
 def num_states(self):
     """Return number of state variables (in addition to the
     membrane potential)."""
     error("Must overload num_states")
 def F(self, v, s, time=None):
     "Return right-hand side for state variable evolution."
     error("Must define F = F(v, s)")
Example #20
0
    def __init__(self, tfml_file,system_name='magma',p_name='Pressure',f_name='Porosity',c_name='c',n_name='n',m_name='m',d_name='d',N_name='N',h_squared_name='h_squared',x0_name='x0'):
        """read the tfml_file and use libspud to populate the internal parameters

        c: wavespeed
        n: permeability exponent
        m: bulk viscosity exponent
        d: wave dimension
        N: number of collocation points
        x0: coordinate wave peak
        h_squared:  the size of the system in compaction lengths
                    squared (h/delta)**2
        """
        # initialize libspud and extract parameters
        libspud.clear_options()
        libspud.load_options(tfml_file)
        # get model dimension
        self.dim = libspud.get_option("/geometry/dimension")
        self.system_name = system_name
        # get solitary wave parameters
        path="/system::"+system_name+"/coefficient::"
        scalar_value="/type::Constant/rank::Scalar/value::WholeMesh/constant"
        vector_value="/type::Constant/rank::Vector/value::WholeMesh/constant::dim"
        c = libspud.get_option(path+c_name+scalar_value)
        n = int(libspud.get_option(path+n_name+scalar_value))
        m = int(libspud.get_option(path+m_name+scalar_value))
        d = float(libspud.get_option(path+d_name+scalar_value))
        N = int(libspud.get_option(path+N_name+scalar_value))
        self.h = np.sqrt(libspud.get_option(path+h_squared_name+scalar_value))
        self.x0 = np.array(libspud.get_option(path+x0_name+vector_value))
        self.swave = SolitaryWave(c,n,m,d,N)
        self.rmax = self.swave.r[-1]
        self.tfml_file = tfml_file
  
        # check that d <= dim
        assert (d <= self.dim)   
        
        # sort out appropriate index for calculating distance r
        if d == 1:
            self.index = [self.dim - 1]
        else: 
            self.index = range(0,int(d))  
            
        # check that the origin point is the correct dimension
        assert (len(self.x0) == self.dim)
        
        
        #read in information for constructing Function space and dolfin objects
        # get the mesh parameters and reconstruct the mesh
        meshtype = libspud.get_option("/geometry/mesh/source[0]/name")
        if meshtype == 'UnitSquare':
            number_cells = libspud.get_option("/geometry/mesh[0]/source[0]/number_cells")
            diagonal = libspud.get_option("/geometry/mesh[0]/source[0]/diagonal")
            mesh = df.UnitSquareMesh(number_cells[0],number_cells[1],diagonal)
        elif meshtype == 'Rectangle':
            x0 = libspud.get_option("/geometry/mesh::Mesh/source::Rectangle/lower_left")
            x1 = libspud.get_option("/geometry/mesh::Mesh/source::Rectangle/upper_right")
            number_cells = libspud.get_option("/geometry/mesh::Mesh/source::Rectangle/number_cells")
            diagonal = libspud.get_option("/geometry/mesh[0]/source[0]/diagonal")
            mesh = df.RectangleMesh(x0[0],x0[1],x1[0],x1[1],number_cells[0],number_cells[1],diagonal)
        elif meshtype == 'UnitCube':
            number_cells = libspud.get_option("/geometry/mesh[0]/source[0]/number_cells")
            mesh = df.UnitCubeMesh(number_cells[0],number_cells[1],number_cells[2])
        elif meshtype == 'Box':
            x0 = libspud.get_option("/geometry/mesh::Mesh/source::Box/lower_back_left")
            x1 = libspud.get_option("/geometry/mesh::Mesh/source::Box/upper_front_right")
            number_cells = libspud.get_option("/geometry/mesh::Mesh/source::Box/number_cells")
            mesh = df.BoxMesh(x0[0],x0[1],x0[2],x1[0],x1[1],x1[2],number_cells[0],number_cells[1],number_cells[2])
        elif meshtype == 'UnitInterval':
            number_cells = libspud.get_option("/geometry/mesh::Mesh/source::UnitInterval/number_cells")
            mesh = df.UnitIntervalMesh(number_cells)
        elif meshtype == 'Interval':
            number_cells = libspud.get_option("/geometry/mesh::Mesh/source::Interval/number_cells")
            left = libspud.get_option("/geometry/mesh::Mesh/source::Interval/left")
            right = libspud.get_option("/geometry/mesh::Mesh/source::Interval/right")
            mesh = df.IntervalMesh(number_cells,left,right)
        elif meshtype == 'File':
            mesh_filename = libspud.get_option("/geometry/mesh::Mesh/source::File/file")
            print "tfml_file  = ",self.tfml_file, "mesh_filename=",mesh_filename
            mesh = df.Mesh(mesh_filename)
        else:
            df.error("Error: unknown mesh type "+meshtype)
           
        #set the functionspace for n-d solitary waves
        path="/system::"+system_name+"/field::"
        p_family = libspud.get_option(path+p_name+"/type/rank/element/family")
        p_degree = libspud.get_option(path+p_name+"/type/rank/element/degree")
        f_family = libspud.get_option(path+f_name+"/type/rank/element/family")
        f_degree = libspud.get_option(path+f_name+"/type/rank/element/degree")        
        pe = df.FiniteElement(p_family, mesh.ufl_cell(), p_degree)
        ve = df.FiniteElement(f_family, mesh.ufl_cell(), f_degree)
        e = pe*ve
        self.functionspace = df.FunctionSpace(mesh, e)

        #work out the order of the fields
        for i in xrange(libspud.option_count("/system::"+system_name+"/field")):
          name = libspud.get_option("/system::"+system_name+"/field["+`i`+"]/name")
          if name == f_name:
            self.f_index = i
          if name == p_name:
            self.p_index = i
 def I(self, v, s, time=None, index=None):
     if index is None:
         error("(Domain) index must be specified for multi cell models")
     # Extract which cell model index (given by index in incoming tuple)
     k = self._key_to_cell_model[index]
     return self._cell_models[k].I(v, s, time)
Example #22
0
 def solve():
     error("solve() function not implemented by solver.")
Example #23
0
 def F(v, s):
     error("Must define F = F(v, s)")
Example #24
0
 def solve():
     error("solve() function not implemented by solver.")
Example #25
0
 def mesh(self):
     error("Need to prescribe domain")
Example #26
0
 def __str__():
     error("__str__ not implemented by solver.")
Example #27
0
 def conductivities(self):
     error("Need to prescribe conducitivites")
Example #28
0
 def I(v, s):
     error("Must define I = I(v, s)")
Example #29
0
def scalar_laplacians(mesh, markers=None, ffun=None):
    """
    Calculate the laplacians

    Arguments
    ---------
    mesh : dolfin.Mesh
       A dolfin mesh
    markers : dict (optional)
        A dictionary with the markers for the
        different bondaries defined in the facet function
        or within the mesh itself.
        The follwing markers must be provided:
        'base', 'lv', 'epi, 'rv' (optional).
        If the markers are not provided the following default
        vales will be used: base = 10, rv = 20, lv = 30, epi = 40.
    fiber_space : str
        A string on the form {familiy}_{degree} which
        determines for what space the fibers should be calculated for.
    """

    if not isinstance(mesh, df.Mesh):
        raise TypeError("Expected a dolfin.Mesh as the mesh argument.")

    # Init connectivities
    mesh.init(2)
    if ffun is None:
        ffun = df.MeshFunction("size_t", mesh, 2, mesh.domains())

    # Boundary markers, solutions and cases
    if markers is None:
        markers = utils.default_markers()
    else:

        keys = ['base', 'lv', 'epi']
        msg = ('Key {key} not found in markers. Make sure to provide a'
               'key-value pair for {keys}')
        for key in keys:
            assert key in markers, msg.format(key=key, keys=keys)
        if 'rv' not in markers:
            df.info(
                'No marker for the RV found. Asssume this is an LV geometry')
            rv_value = 20
            # Just make sure that this value is not used for any of the other boundaries.
            while rv_value in markers.values():
                rv_value += 1
            markers['rv'] = rv_value

    markers_str = '\n'.join(
        ['{}: {}'.format(k, v) for k, v in markers.items()])
    df.info(('Compute scalar laplacian solutions with the markers: \n'
             '{}').format(markers_str))

    cases = ["rv", "lv", "epi"]
    boundaries = cases + ["base"]

    # Check that all boundary faces are marked
    num_boundary_facets = df.BoundaryMesh(mesh, "exterior").num_cells()
    if num_boundary_facets != sum(
        [np.sum(ffun.array() == markers[boundary])
         for boundary in boundaries]):

        df.error(("Not all boundary faces are marked correctly. Make sure all "
                  "boundary facets are marked as: {}"
                  "").format(", ".join(
                      ["{} = {}".format(k, v) for k, v in markers.items()])))

    # Compte the apex to base solutons
    apex = apex_to_base(mesh, markers["base"], ffun)

    # Find the rest of the laplace soltions
    V = apex.function_space()
    u = df.TrialFunction(V)
    v = df.TestFunction(V)

    a = df.dot(df.grad(u), df.grad(v)) * df.dx
    L = v * df.Constant(0) * df.dx
    solutions = dict((what, df.Function(V)) for what in cases)
    solutions["apex"] = apex

    df.info("  Num coords: {0}".format(mesh.num_vertices()))
    df.info("  Num cells: {0}".format(mesh.num_cells()))

    solver_param = dict(solver_parameters=dict(
        preconditioner="ml_amg" if df.
        has_krylov_solver_preconditioner("ml_amg") else "default",
        linear_solver="gmres",
    ))

    # Check that solution of the three last cases all sum to 1.
    sol = solutions["apex"].vector().copy()
    sol[:] = 0.0

    if len(ffun.array()[ffun.array() == markers['rv']]) == 0:
        # Remove the RV
        cases.pop(next(i for i, c in enumerate(cases) if c == 'rv'))

    # Iterate over the three different cases
    df.info("Solving Laplace equation")
    for case in cases:
        df.info(" {0} = 1, {1} = 0".format(
            case, ", ".join([c for c in cases if c != case])))
        # Solve linear system
        bcs = [
            df.DirichletBC(V, 1 if what == case else 0, ffun, markers[what],
                           "topological") for what in cases
        ]
Example #30
0
 def __str__():
     error("__str__ not implemented by solver.")
Example #31
0
def runge_kutta_step(A, b, c,
                     V,
                     F,
                     u0,
                     t, dt,
                     sympy_dirichlet_bcs=[],
                     tol=1.0e-10,
                     verbose=True
                     ):
    '''Runge's third-order method for u' = F(u).
    '''
    # Make sure that the scheme is strictly upper-triangular.
    import numpy
    import sympy as smp
    s = len(b)
    A = numpy.array(A)
    if numpy.any(abs(A[numpy.triu_indices(s)]) > 1.0e-15):
        error('Butcher tableau not upper triangular.')

    u = TrialFunction(V)
    v = TestFunction(V)

    solver_params = {'linear_solver': 'iterative',
                     'symmetric': True,
                     'preconditioner': 'hypre_amg',
                     'krylov_solver': {'relative_tolerance': tol,
                                       'absolute_tolerance': 0.0,
                                       'maximum_iterations': 100,
                                       'monitor_convergence': verbose
                                       }
                     }

    # For the boundary values, see
    #
    #   Intermediate Boundary Conditions for Runge-Kutta Time Integration of
    #   Initial-Boundary Value Problems,
    #   D. Pathria,
    #   <http://www.math.uh.edu/~hjm/june1995/p00379-p00388.pdf>.
    #
    tt = smp.symbols('t')
    BCS = []
    # Get boundary conditions and their derivatives.
    for k in range(2):
        BCS.append([])
        for boundary, expr in sympy_dirichlet_bcs:
            # Form k-th derivative.
            DexprDt = smp.diff(expr, tt, k)
            # TODO set degree of expression
            BCS[-1].append(DirichletBC(V,
                           Expression(smp.printing.ccode(DexprDt), t=t + dt),
                           boundary))

    # Use the Constant() syntax to avoid compiling separate expressions for
    # different values of dt.
    ddt = Constant(dt)

    # Compute the stage values.
    k = []
    for i in range(s):
        U = u0
        for j in range(i):
            U += ddt * A[i][j] * k[j]
        L = F(t + c[i] * dt, U, v)
        k.append(Function(V))
        # Using this bc is somewhat random.
        # TODO come up with something better here.
        for g in BCS[1]:
            g.t = t + c[i] * dt
        solve(u * v * dx == L, k[i],
              bcs=BCS[1],
              solver_parameters=solver_params
              )
        #plot(k[-1])
        #interactive()

    # Put it all together.
    U = u0
    for i in range(s):
        U += b[i] * k[i]
    theta = Function(V)
    for g in BCS[0]:
        g.t = t + dt
    solve(u * v * dx == (u0 + ddt * U) * v * dx,
          theta,
          bcs=BCS[0],
          solver_parameters=solver_params
          )

    return theta