Пример #1
0
def poincare_const(o, p, d=1):
    # Vectorial Poincare, see [Blechta, Malek, Vohralik 2016]
    if d != 1 and p != 2.0:
        return d**abs(0.5-1.0/p) * poincare_const(o, p)

    if isinstance(o, Mesh):
        raise NotImplementedError("Poincare constant not implemented on mesh!")

    if isinstance(o, Cell):
        assert _is_simplex(o), "Poincare constant not " \
                "implemented on non-simplicial cells!"
        h = max(e.length() for e in edges(o))
        return h*_poincare_convex(p)

    if isinstance(o, CellType):
        assert _is_simplex(o), "Poincare constant not " \
                "implemented on non-simplicial cells!"
        return _poincare_convex(p)

    if isinstance(o, Vertex):
        # TODO: fix using ghosted mesh
        not_working_in_parallel("Poincare computation on patch")
        h = max(v0.point().distance(v1.point())
                for c0 in cells(o) for v0 in vertices(c0)
                for c1 in cells(o) for v1 in vertices(c1))
        # FIXME: Implement a check for convexity of the patch
        _warn_poincare_convex()
        return h*_poincare_convex(p)

    raise NotImplementedError
Пример #2
0
def compare_split_matrices(eq, mat, vec, Wv, Wu, eps=1e-14):
    # Assemble coupled system
    M, v = dolfin.system(eq)
    M = assemble(M).array()
    v = assemble(v).get_local()
    Ni, Nj = mat.shape

    def compute_diff(coupled, split):
        diff = abs(coupled - split).flatten().max()
        return diff

    # Rebuild coupled system from split parts
    M2 = numpy.zeros_like(M)
    v2 = numpy.zeros_like(v)
    for i in range(Ni):
        dm_Wi = Wv.sub(i).dofmap()

        if vec[i] is not None:
            data = assemble(vec[i]).get_local()
            dm_Vi = vec[i].arguments()[0].function_space().dofmap()
            for cell in dolfin.cells(Wv.mesh()):
                dofs_Wi = dm_Wi.cell_dofs(cell.index())
                dofs_Vi = dm_Vi.cell_dofs(cell.index())
                v2[dofs_Wi] = data[dofs_Vi]

        dofs_i = dm_Wi.dofs()
        diff = compute_diff(v[dofs_i], v2[dofs_i])
        print(
            'Vector part %d has error %10.3e' % (i, diff), '<---' if diff > eps else ''
        )

        for j in range(Nj):
            dm_Wj = Wv.sub(j).dofmap()
            if mat[i, j] is not None:
                data = assemble(mat[i, j]).array()
                dm_Vi = mat[i, j].arguments()[0].function_space().dofmap()
                dm_Vj = mat[i, j].arguments()[1].function_space().dofmap()
                for cell in dolfin.cells(Wv.mesh()):
                    dofs_Wi = dm_Wi.cell_dofs(cell.index())
                    dofs_Wj = dm_Wj.cell_dofs(cell.index())
                    dofs_Vi = dm_Vi.cell_dofs(cell.index())
                    dofs_Vj = dm_Vj.cell_dofs(cell.index())
                    W_idx = numpy.ix_(dofs_Wi, dofs_Wj)
                    V_idx = numpy.ix_(dofs_Vi, dofs_Vj)
                    M2[W_idx] = data[V_idx]

            dofs_j = dm_Wj.dofs()
            idx = numpy.ix_(dofs_i, dofs_j)
            diff = compute_diff(M[idx], M2[idx])
            print(
                'Matrix part (%d, %d) has error %10.3e' % (i, j, diff),
                '<---' if diff > eps else '',
            )

    # Check that the original and rebuilt systems are identical
    assert compute_diff(M, M2) < eps
    assert compute_diff(v, v2) < eps
Пример #3
0
    def create_measures(self):

        for rom_cell_counter, rom_cell in enumerate(df.cells(
                self._mesh_coarse)):

            form = FluxForm(df.Function(self._V), df.Function(self._Vc))
            facetfct = df.MeshFunction('size_t', self._mesh_fine,
                                       self._mesh_fine.topology().dim() - 1)
            facetfct.set_all(0)

            for local_facet_id, rom_facet in enumerate(df.facets(rom_cell)):
                for fom_facet in df.facets(self._mesh_fine):

                    mp = fom_facet.midpoint()

                    p0 = df.Vertex(self._mesh_coarse, rom_facet.entities(0)[0])
                    p1 = df.Vertex(self._mesh_coarse, rom_facet.entities(0)[1])
                    p0 = df.Point(np.array([p0.x(0), p0.x(1)]))
                    p1 = df.Point(np.array([p1.x(0), p1.x(1)]))

                    eps = mp.distance(p0) + mp.distance(p1) - p0.distance(p1)

                    if eps < 1e-12:

                        facetfct.set_value(fom_facet.index(),
                                           local_facet_id + 1)

                if self._rom_exterior_facets[rom_facet.index()]:
                    form.append_ds(
                        df.Measure('ds',
                                   domain=self._mesh_fine,
                                   subdomain_data=facetfct,
                                   subdomain_id=local_facet_id + 1))
                else:
                    form.append_dS(
                        df.Measure('dS',
                                   domain=self._mesh_fine,
                                   subdomain_data=facetfct,
                                   subdomain_id=local_facet_id + 1))

            cellfct = df.MeshFunction('size_t', self._mesh_fine,
                                      self._mesh_fine.topology().dim())
            cellfct.set_all(0)

            for fom_cell in df.cells(self._mesh_fine):
                if rom_cell.contains(fom_cell.midpoint()):
                    cellfct.set_value(fom_cell.index(), 1)

            form.append_dx(
                df.Measure('dx',
                           domain=self._mesh_fine,
                           subdomain_data=cellfct,
                           subdomain_id=1))
            self._flux_forms.append(form)

        self._initialized = True
Пример #4
0
def friedrichs_const(o, p):
    if isinstance(o, Vertex):
        # TODO: fix using ghosted mesh
        not_working_in_parallel("Friedrichs computation on patch")
        d = max(v0.point().distance(v1.point())
                for c0 in cells(o) for v0 in vertices(c0)
                for c1 in cells(o) for v1 in vertices(c1))
        # FIXME: Implement the check
        _warn_friedrichs_lines()
        return d

    raise NotImplementedError
Пример #5
0
def make_submesh(mesh, use_indices):
    '''Submesh + mapping of child to parent cell and vertex indices'''
    length0 = sum(c.volume() for c in df.cells(mesh))
    tdim = mesh.topology().dim()

    f = df.MeshFunction('size_t', mesh, tdim, 0)
    f.array()[use_indices] = 1

    submesh = df.SubMesh(mesh, f, 1)
    length1 = sum(c.volume() for c in df.cells(submesh))
    print_red('Reduced mesh has %g  volume of original' %
              (length1 / length0, ))
    return (submesh, submesh.data().array('parent_cell_indices', tdim),
            submesh.data().array('parent_vertex_indices', 0))
Пример #6
0
 def refine_maxh(self, maxh, uniform=False):
     """Refine mesh of FEM basis such that maxh of mesh is smaller than given value."""
     if maxh <= 0 or self.mesh.hmax() < maxh:            
         return self, self.project_onto, self.project_onto, 0
     ufl = self._fefs.ufl_element()
     mesh = self.mesh
     num_cells_refined = 0
     if uniform:
         while mesh.hmax() > maxh:
             num_cells_refined += mesh.num_cells()
             mesh = refine(mesh)         # NOTE: this global refine results in a red-refinement as opposed to bisection in the adaptive case
     else:
         while mesh.hmax() > maxh:
             cell_markers = CellFunction("bool", mesh)
             cell_markers.set_all(False)
             for c in cells(mesh):
                 if c.diameter() > maxh:
                     cell_markers[c.index()] = True
                     num_cells_refined += 1
             mesh = refine(mesh, cell_markers)
     if self._fefs.num_sub_spaces() > 1:
         new_fefs = VectorFunctionSpace(mesh, ufl.family(), ufl.degree())
     else:
         new_fefs = FunctionSpace(mesh, ufl.family(), ufl.degree())
     new_basis = FEniCSBasis(new_fefs)
     prolongate = new_basis.project_onto
     restrict = self.project_onto
     return new_basis, prolongate, restrict, num_cells_refined
Пример #7
0
def is_continuous(mesh):
    '''
    We say that the embedded mesh is continuous if for each 2 cells 
    there exists a continuous path of mesh (other) cells between 
    the two cells
    '''
    import networkx as nx
    assert mesh.topology().dim() < mesh.geometry().dim()
    # This mounts to the graph of the mesh having only one connected
    # component
    G = nx.Graph()
    if mesh.topology().dim() == 1:
        mesh.init(1, 0)
        G.add_edges_from((tuple(cell.entities(0)) for cell in df.cells(mesh)))
    else:
        tdim = mesh.topology().dim() == 2

        graph_edges = set()
        # Geome edge cell connectivity to define the graph edges
        mesh.init(tdim-1, tdim)
        e2c = mesh.topology()(tdim-1, tdim)
        for e in range(mesh.num_entities(tdim-1)):
            cells = sorted(e2c(e))

            graph_edges.update(zip(cells[:-1], cells[1:]))
        G.add_edges_from(graph_edges)
        
    return nx.number_connected_components(G) == 1
Пример #8
0
def from_mesh(cls, mesh, initial_point, k):
  print 'Creating Mesh from dolfin.Mesh data.'

  # Make sure it is the right kind of mesh.
  print 'Initializing mesh attributes (edges, faces, etc.)'
  mesh.init()  # This will do nothing if already called.
  check_mesh_type(mesh)

  # Compute extra data not stored on the object.
  print 'Reading vertex list from the mesh object'
  vertex_list = list(dolfin.vertices(mesh))
  print 'Reading edge list from the mesh object'
  edge_list = list(dolfin.edges(mesh))
  print 'Reading facets list from the mesh object'
  # Use facets since they have facet.exterior() set.
  facets_list = list(dolfin.facets(mesh))
  # Get values specific to motion on the mesh.
  print 'Reading cell list from the mesh object'
  cell_list = list(dolfin.cells(mesh))
  initial_face_index = get_face(dolfin.Point(*initial_point),
                                mesh, cell_list)

  print 'Parsing exterior faces and creating Face objects'
  (all_vertices, triangles, face_local_bases, neighbor_faces,
   initial_face_index) = get_face_data(vertex_list, edge_list,
                                       facets_list, initial_face_index)

  return cls(k, initial_point, initial_face_index,
             all_vertices, triangles, face_local_bases, neighbor_faces)
Пример #9
0
    def create_3d_mesh(self, mesh):

        nv = mesh.num_vertices()
        nc = mesh.num_cells()
        h = self.thickness

        mesh3 = df.Mesh()
        editor = df.MeshEditor()
        editor.open(mesh3, 3, 3)
        editor.init_vertices(2 * nv)
        editor.init_cells(3 * nc)

        for v in df.vertices(mesh):
            i = v.index()
            p = v.point()
            editor.add_vertex(i, p.x(), p.y(), 0)
            editor.add_vertex(i + nv, p.x(), p.y(), h)

        gid = 0
        for c in df.cells(mesh):
            i, j, k = c.entities(0)
            editor.add_cell(gid, i, j, k, i + nv)
            gid = gid + 1
            editor.add_cell(gid, j, j + nv, k, i + nv)
            gid = gid + 1
            editor.add_cell(gid, k, k + nv, j + nv, i + nv)
            gid = gid + 1

        editor.close()
        return mesh3
def assert_dof_map_two_blocks_no_restriction(V1, V2, block_V):
    local_dimension1 = V1.dofmap().ownership_range()[1] - V1.dofmap().ownership_range()[0]
    local_dimension2 = V2.dofmap().ownership_range()[1] - V2.dofmap().ownership_range()[0]
    block_local_dimension = block_V.block_dofmap().ownership_range()[1] - block_V.block_dofmap().ownership_range()[0]
    assert local_dimension1 + local_dimension2 == block_local_dimension
    global_dimension1 = V1.dofmap().global_dimension()
    global_dimension2 = V2.dofmap().global_dimension()
    block_global_dimension = block_V.block_dofmap().global_dimension()
    assert global_dimension1 + global_dimension2 == block_global_dimension
    for c in cells(block_V.mesh()):
        V1_cell_dofs = V1.dofmap().cell_dofs(c.index())
        V1_cell_owned_local_dofs = [a for a in V1_cell_dofs if a < local_dimension1]
        V1_cell_unowned_local_dofs = [a for a in V1_cell_dofs if a >= local_dimension1]
        V2_cell_dofs = V2.dofmap().cell_dofs(c.index())
        V2_cell_owned_local_dofs = [a + local_dimension1 for a in V2_cell_dofs if a < local_dimension2]
        V2_cell_unowned_local_dofs = [a + local_dimension1 for a in V2_cell_dofs if a >= local_dimension2]
        V_cell_owned_local_dofs = concatenate((V1_cell_owned_local_dofs, V2_cell_owned_local_dofs))
        V_cell_unowned_local_dofs = concatenate((V1_cell_unowned_local_dofs, V2_cell_unowned_local_dofs))
        block_V_cell_dofs = block_V.block_dofmap().cell_dofs(c.index())
        block_V_cell_owned_local_dofs = [b for b in block_V_cell_dofs if b < block_local_dimension]
        block_V_cell_unowned_local_dofs = [b for b in block_V_cell_dofs if b >= block_local_dimension]
        assert_owned_local_dofs(V_cell_owned_local_dofs, block_V_cell_owned_local_dofs)
        assert_unowned_local_dofs(V_cell_unowned_local_dofs, block_V_cell_unowned_local_dofs)
    V_dof_coordinates = concatenate((V1.tabulate_dof_coordinates(), V2.tabulate_dof_coordinates()))
    block_V_dof_coordinates = block_V.tabulate_dof_coordinates()
    assert_tabulated_dof_coordinates(V_dof_coordinates, block_V_dof_coordinates)
Пример #11
0
def get_regional_midpoints(strain_markers, mesh):

    coords = {
        region: {coord: []
                 for coord in range(3)}
        for region in range(18)
    }

    import dolfin

    for cell in dolfin.cells(mesh):

        # Get coordinates to cell midpoint
        x = cell.midpoint().x()
        y = cell.midpoint().y()
        z = cell.midpoint().z()

        # Get index of cell
        index = cell.index()

        region = strain_markers.array()[index]

        coords[region][0].append(x)
        coords[region][1].append(y)
        coords[region][2].append(z)

    mean_coords = {region: np.zeros(3) for region in range(18)}
    for i in range(18):
        for j in range(3):
            mean_coords[i][j] = np.mean(coords[i][j])

    return mean_coords, coords
Пример #12
0
def is_continuous(mesh):
    '''
    We say that the embedded mesh is continuous if for each 2 cells 
    there exists a continuous path of mesh (other) cells between 
    the two cells
    '''
    assert mesh.topology().dim() < mesh.geometry().dim()
    # This mounts to the graph of the mesh having only one connected
    # component
    G = nx.Graph()
    if mesh.topology().dim() == 1:
        mesh.init(1, 0)
        G.add_edges_from((tuple(cell.entities(0)) for cell in df.cells(mesh)))
    else:
        tdim = mesh.topology().dim() == 2

        graph_edges = set()
        # Geome edge cell connectivity to define the graph edges
        mesh.init(tdim - 1, tdim)
        e2c = mesh.topology()(tdim - 1, tdim)
        for e in range(mesh.num_entities(tdim - 1)):
            cells = sorted(e2c(e))

            graph_edges.update(zip(cells[:-1], cells[1:]))
        G.add_edges_from(graph_edges)

    return nx.number_connected_components(G) == 1
Пример #13
0
    def __init__(self, mesh, orientation):
        # Manifold assumption
        assert 1 <= mesh.topology().dim() < mesh.geometry().dim()
        gdim = mesh.geometry().dim()

        # Orientation from inside point
        if isinstance(orientation, (list, np.ndarray, tuple)):
            assert len(orientation) == gdim

            kwargs = {'x0%d' % i: val for i, val in enumerate(orientation)}
            orientation = ['x[%d] - x0%d' % (i, i) for i in range(gdim)]
            orientation = df.Expression(orientation, degree=1, **kwargs)
        
        assert orientation.ufl_shape == (gdim, )    

        V = df.VectorFunctionSpace(mesh, 'DG', 0, gdim)
        df.Function.__init__(self, V)
        n_values = self.vector().get_local()

        values = []
        for cell in df.cells(mesh):
            n = cell.cell_normal().array()[:gdim]
            x = cell.midpoint().array()[:gdim]
            # Disagree?
            if np.inner(orientation(x), n) < 0:
                n *= -1
            values.append(n/np.linalg.norm(n))
        values = np.array(values)

        for sub in range(gdim):
            dofs = V.sub(sub).dofmap().dofs()
            n_values[dofs] = values[:, sub]
        self.vector().set_local(n_values)
        self.vector().apply('insert')
    def relocate(self):
        # Relocate particles on cells and processors
        p_map = self.particle_map
        # Map such that map[old_cell] = [(new_cell, particle_id), ...]
        # Ie new destination of particles formerly in old_cell
        new_cell_map = defaultdict(list)
        for cwp in p_map.itervalues():
            for i, particle in enumerate(cwp.particles):
                point = df.Point(*particle.position)
                # Search only if particle moved outside original cell
                if not cwp.contains(point):
                    found = False
                    # Check neighbor cells
                    for neighbor in df.cells(cwp):
                        if neighbor.contains(point):
                            new_cell_id = neighbor.index()
                            found = True
                            break
                    # Do a completely new search if not found by now
                    if not found:
                        new_cell_id = self.locate(particle)
                    # Record to map
                    new_cell_map[cwp.index()].append((new_cell_id, i))

        # Rebuild locally the particles that end up on the process. Some
        # have cell_id == -1, i.e. they are on other process
        list_of_escaped_particles = []
        for old_cell_id, new_data in new_cell_map.iteritems():
            # We iterate in reverse becasue normal order would remove some
            # particle this shifts the whole list!
            for (new_cell_id, i) in sorted(new_data,
                                           key=lambda t: t[1],
                                           reverse=True):
                particle = p_map.pop(old_cell_id, i)

                if new_cell_id == -1 or new_cell_id == __UINT32_MAX__ :
                    list_of_escaped_particles.append(particle)
                else:
                    p_map += self.mesh, new_cell_id, particle

        # Create a list of how many particles escapes from each processor
        self.my_escaped_particles[0] = len(list_of_escaped_particles)
        # Make all processes aware of the number of escapees
        comm.Allgather(self.my_escaped_particles, self.tot_escaped_particles)

        # Send particles to root
        if self.myrank != 0:
            for particle in list_of_escaped_particles:
                particle.send(0)

        # Receive the particles escaping from other processors
        if self.myrank == 0:
            for proc in self.other_processes:
                for i in range(self.tot_escaped_particles[proc]):
                    self.particle0.recv(proc)
                    list_of_escaped_particles.append(copy.deepcopy(self.particle0))

        # Put all travelling particles on all processes, then perform new search
        travelling_particles = comm.bcast(list_of_escaped_particles, root=0)
        self.add_particles(travelling_particles)
Пример #15
0
def _adaptive_mesh_refinement(dx, phi, mu, sigma, omega, conv, voltages):
    from dolfin import cells, refine
    eta = _error_estimator(dx, phi, mu, sigma, omega, conv, voltages)
    mesh = phi.function_space().mesh()
    level = 0
    TOL = 1.0e-4
    E = sum([e * e for e in eta])
    E = sqrt(MPI.sum(E))
    info('Level %d: E = %g (TOL = %g)' % (level, E, TOL))
    # Mark cells for refinement
    REFINE_RATIO = 0.5
    cell_markers = MeshFunction('bool', mesh, mesh.topology().dim())
    eta_0 = sorted(eta, reverse=True)[int(len(eta) * REFINE_RATIO)]
    eta_0 = MPI.max(eta_0)
    for c in cells(mesh):
        cell_markers[c] = eta[c.index()] > eta_0
    # Refine mesh
    mesh = refine(mesh, cell_markers)
    # Plot mesh
    plot(mesh)
    interactive()
    exit()
    ## Compute error indicators
    #K = array([c.volume() for c in cells(mesh)])
    #R = array([abs(source([c.midpoint().x(), c.midpoint().y()])) for c in cells(mesh)])
    #gam = h*R*sqrt(K)
    return
Пример #16
0
    def assemble_A_tilde_single_element(self):
        """
        Assemble block diagonal à and Ã_inv matrices where the blocks
        are the dofs in a single element
        """
        Aglobal = self.M if self.a_tilde_is_mass else self.A
        if self.A_tilde is None:
            At = Aglobal.copy()
            Ati = Aglobal.copy()
        else:
            At = self.A_tilde
            Ati = self.A_tilde_inv
        At.zero()
        Ati.zero()

        dm = self.Vuvw.dofmap()
        N = dm.cell_dofs(0).shape[0]
        Alocal = numpy.zeros((N, N), float)

        # Loop over cells and get the block diagonal parts (should be moved to C++)
        for cell in dolfin.cells(self.simulation.data['mesh'], 'regular'):
            # Get global dofs
            istart = Aglobal.local_range(0)[0]
            dofs = dm.cell_dofs(cell.index()) + istart

            # Get block diagonal part of A, invert and insert into approximations
            Aglobal.get(Alocal, dofs, dofs)
            Alocal_inv = numpy.linalg.inv(Alocal)
            At.set(Alocal, dofs, dofs)
            Ati.set(Alocal_inv, dofs, dofs)
        return At, Ati
Пример #17
0
    def plot(self, mpl_ax, levels=50, lw=0.3, mesh_alpha=0, mesh_lw=0.2):
        """
        Plot the function on a matplotlib axes. Call .compute() first
        to calculate the stream function
        """
        if self._triangulation is None:
            from matplotlib.tri import Triangulation

            coords = self.mesh.coordinates()
            triangles = []
            for cell in df.cells(self.mesh):
                cell_vertices = cell.entities(0)
                triangles.append(cell_vertices)
            self._triangulation = Triangulation(coords[:, 0], coords[:, 1],
                                                triangles)

        if mesh_alpha > 0:
            mpl_ax.triplot(self._triangulation,
                           color='#000000',
                           alpha=mesh_alpha,
                           lw=mesh_lw)

        Z = self.psi.compute_vertex_values()
        if all(Z == 0):
            return

        mpl_ax.tricontour(
            self._triangulation,
            Z,
            levels,
            colors='#0000AA',
            linewidths=lw,
            linestyles='solid',
        )
Пример #18
0
def convert_meshfunction_to_function(meshfunction, functionspace=None):
    """
    Convert a meshfunction to a function with 'p0' elements.

    Parameters
    ----------

    meshfunction : dolfin.cpp.mesh.MeshFunctionDouble
        The mesh function specifying a field that is constant over each cell.
    functionspace : dolfin.FunctionSpace (default None)
        The function space to be used for creating the dolfin.Function object.
        A 'p0' function space is used if None is specified.


    Returns
    -------

    func : dolfin.Function
        The scalar-valued function storing the values of the mesh function.

    """
    mesh = meshfunction.mesh()
    if functionspace is None:
        functionspace = dlf.FunctionSpace(mesh, "DG", 0)
    func = dlf.Function(functionspace)
    dofmap = functionspace.dofmap()
    new_values = np.zeros(func.vector().local_size())
    for cell in dlf.cells(mesh):
        new_values[dofmap.cell_dofs(cell.index())] = meshfunction[cell]
    func.vector().set_local(new_values)
    return func
Пример #19
0
def distribute_DG0(Q, pop):
    """
    Returns the charge volume density in DG0 (discontinuous Lagrange space of 
    order 0). The charge of all the particles inside a given cell is simply 
    added together and then divided by the volume of the cell.

    Args:
        Q (DOLFIN: FunctionSpace): DG0 function space
        pop (PUNC: Population) 
    """
    assert Q.ufl_element().family() == 'Discontinuous Lagrange'
    assert Q.ufl_element().degree() == 0

    rho = df.Function(Q)

    for cell in df.cells(Q.mesh()):
        cellindex = cell.index()
        dofindex = Q.dofmap().cell_dofs(cellindex)
        accum = 0
        for particle in pop[cellindex]:
            accum += particle.q

        accum /= cell.volume()
        rho.vector()[dofindex] = accum

    return rho
def assert_dof_map_single_block_no_restriction(V, block_V):
    local_dimension = V.dofmap().ownership_range()[1] - V.dofmap().ownership_range()[0]
    block_local_dimension = block_V.block_dofmap().ownership_range()[1] - block_V.block_dofmap().ownership_range()[0]
    assert local_dimension == block_local_dimension
    global_dimension = V.dofmap().global_dimension()
    block_global_dimension = block_V.block_dofmap().global_dimension()
    assert global_dimension == block_global_dimension
    V_local_to_global_unowned = V.dofmap().local_to_global_unowned()
    block_V_local_to_global_unowned = block_V.block_dofmap().local_to_global_unowned()
    block_V_local_to_global_unowned = unique([b//V.dofmap().index_map().block_size() for b in block_V_local_to_global_unowned])
    assert array_sorted_equal(V_local_to_global_unowned, block_V_local_to_global_unowned)
    for c in cells(block_V.mesh()):
        V_cell_dofs = V.dofmap().cell_dofs(c.index())
        V_cell_owned_local_dofs = [a for a in V_cell_dofs if a < local_dimension]
        V_cell_unowned_local_dofs = [a for a in V_cell_dofs if a >= local_dimension]
        V_cell_global_dofs = [V.dofmap().local_to_global_index(a) for a in V_cell_dofs]
        block_V_cell_dofs = block_V.block_dofmap().cell_dofs(c.index())
        block_V_cell_owned_local_dofs = [b for b in block_V_cell_dofs if b < block_local_dimension]
        block_V_cell_unowned_local_dofs = [b for b in block_V_cell_dofs if b >= block_local_dimension]
        block_V_cell_global_dofs = [block_V.block_dofmap().local_to_global_index(b) for b in block_V_cell_dofs]
        assert_owned_local_dofs(V_cell_owned_local_dofs, block_V_cell_owned_local_dofs)
        assert_unowned_local_dofs(V_cell_unowned_local_dofs, block_V_cell_unowned_local_dofs)
        assert_global_dofs(V_cell_global_dofs, block_V_cell_global_dofs)
    V_dof_coordinates = V.tabulate_dof_coordinates()
    block_V_dof_coordinates = block_V.tabulate_dof_coordinates()
    assert_tabulated_dof_coordinates(V_dof_coordinates, block_V_dof_coordinates)
Пример #21
0
    def morph_mesh(self):
        """
        Move the mesh and update cached geometry information. It is assumed
        that the mesh velocities u_mesh0, u_mesh1 etc are already populated
        """
        sim = self.simulation

        # Get the mesh displacement
        for d in range(sim.ndim):
            umi = sim.data['u_mesh%d' % d]
            self.assigners[d].assign(self.displacement.sub(d), umi)
        self.displacement.vector()[:] *= sim.dt

        # Save the cell volumes before morphing
        mesh = sim.data['mesh']
        cvolp = sim.data['cvolp']
        dofmap_cvol = cvolp.function_space().dofmap()
        for cell in dolfin.cells(mesh):
            dofs = dofmap_cvol.cell_dofs(cell.index())
            assert len(dofs) == 1
            cvolp.vector()[dofs[0]] = cell.volume()

        # Move the mesh according to the given displacements
        dolfin.ALE.move(mesh, self.displacement)
        mesh.bounding_box_tree().build(mesh)
        sim.update_mesh_data(connectivity_changed=False)
Пример #22
0
    def write_function(self, func, t):
        """
        Write a function to XDMF file. The function is from the original
        function space, but will be written in the subdivided function space
        with truely discontinuous elements
        """
        f = self.subdivided_function
        V = self.subdivided_function_space
        dm = V.dofmap()
        gdim = self.subdivided_mesh.geometry().dim()
        dof_coords = V.tabulate_dof_coordinates().reshape((-1, gdim))
        all_values = f.vector().get_local()
        f.rename(func.name(), func.name())

        vals = numpy.zeros(1, float)
        crds = numpy.zeros(2, float)

        # Evaluate the function at all subcell dof locations
        for subcell in dolfin.cells(self.subdivided_mesh):
            subcell_id = subcell.index()
            for dof in dm.cell_dofs(subcell_id):
                crds[:] = dof_coords[dof]
                parent_cell = dolfin.Cell(self.mesh,
                                          self.parent_cell[subcell_id])
                func.eval_cell(vals, crds, parent_cell)
                all_values[dof] = vals[0]

        f.vector().set_local(all_values)
        f.vector().apply('insert')
        self.xdmf.write(f, float(t))
Пример #23
0
    def __init__(self, V, geo, name, f):
        self.V = V
        # get dofs lying in subdomain
        dofmap = V.dofmap()
        tup = geo.physicaldomain(name)
        sub = geo.subdomains
        mesh = geo.mesh

        subdofs = set()
        for i, cell in enumerate(dolfin.cells(mesh)):
            if sub[cell] in tup:
                celldofs = dofmap.cell_dofs(i)
                subdofs.update(celldofs)

        subdofs = np.array(list(subdofs), dtype="intc")
        d2v = dolfin.dof_to_vertex_map(V)
        co = mesh.coordinates()

        # create function with desired values
        # could also be implemented with Expression.eval_cell like pwconst
        bc_f = dolfin.Function(V)
        for dof in subdofs:
            x = co[d2v[dof]]
            bc_f.vector()[dof] = f(x)
        self.bc_f = bc_f
        self.dof_set = subdofs
Пример #24
0
def invert_block_diagonal_matrix(V, M, Minv=None):
    """
    Given a block diagonal matrix (DG mass matrix or similar), use local
    dense inverses to compute the  inverse matrix and return it, optionally
    reusing the given Minv tensor
    """
    mesh = V.mesh()
    dm = V.dofmap()
    N = dm.cell_dofs(0).shape[0]
    Mlocal = numpy.zeros((N, N), float)

    if Minv is None:
        Minv = dolfin.as_backend_type(M.copy())

    # Loop over cells and get the block diagonal parts (should be moved to C++)
    istart = M.local_range(0)[0]
    for cell in dolfin.cells(mesh, 'regular'):
        # Get global dofs
        dofs = dm.cell_dofs(cell.index()) + istart

        # Get block diagonal part of approx_A, invert it and insert into M⁻¹
        M.get(Mlocal, dofs, dofs)
        Mlocal_inv = numpy.linalg.inv(Mlocal)
        Minv.set(Mlocal_inv, dofs, dofs)

    Minv.apply('insert')
    return Minv
Пример #25
0
def mark_biv_mesh(
    mesh, ffun=None, markers=None, tol=0.01, values={"lv": 0, "septum": 1, "rv": 2}
):

    from .ldrb import scalar_laplacians

    scalars = scalar_laplacians(mesh=mesh, ffun=ffun, markers=markers)

    for cell in df.cells(mesh):

        lv = scalars["lv"](cell.midpoint())
        rv = scalars["rv"](cell.midpoint())
        epi = scalars["epi"](cell.midpoint())

        print(cell.index(), "lv = {}, rv = {}".format(lv, rv))

        if (lv > tol or epi > 1 - tol) and rv < tol:
            print("LV")
            value = values["lv"]
            if lv < tol and rv > lv:
                value = values["rv"]
        elif (rv > tol or epi > 1 - tol) and lv < tol:
            print("RV")
            value = values["rv"]
        else:
            print("SEPTUM")
            value = values["septum"]

        mesh.domains().set_marker((cell.index(), value), 3)

    sfun = df.MeshFunction("size_t", mesh, 3, mesh.domains())
    return sfun
Пример #26
0
def taylor_to_DG1_matrix_3D(V):
    """
    Create the per cell matrices that when matrix multiplied with the
    Taylor cell dofs return a vector of Lagrange cell dofs.
    This implementation handles DG1 function space V in 3D
    """
    mesh = V.mesh()
    vertices = mesh.coordinates()

    tdim = mesh.topology().dim()
    num_cells_owned = mesh.topology().ghost_offset(tdim)

    x = numpy.zeros((4, 3), float)
    A = numpy.zeros((num_cells_owned, 4, 4), float)
    for cell in cells(mesh):
        icell = cell.index()

        verts = cell.entities(0)
        x[0] = vertices[verts[0]]
        x[1] = vertices[verts[1]]
        x[2] = vertices[verts[2]]
        x[3] = vertices[verts[3]]
        xc = (x[0] + x[1] + x[2] + x[3]) / 4

        for i in range(4):
            dx, dy, dz = x[i, 0] - xc[0], x[i, 1] - xc[1], x[i, 2] - xc[2]
            A[icell, i, 0] = 1
            A[icell, i, 1] = dx
            A[icell, i, 2] = dy
            A[icell, i, 3] = dz

    return A
Пример #27
0
def preprocess(simulation, level_set_view):
    """
    Compute distance between dofs
    """
    V = level_set_view.function_space()
    mesh = V.mesh()
    dm = V.dofmap()

    # Get coordinates of both regular and ghost dofs
    dofs_x, Nlocal = all_dof_coordinates(V)

    cell_dofs = [None] * mesh.num_cells()
    dof_cells = [[] for _ in range(V.dim())]
    dof_dist = {}
    for cell in dolfin.cells(mesh, 'all'):
        cid = cell.index()
        dofs = dm.cell_dofs(cid)
        cell_dofs[cid] = list(dofs)
        for dof in dofs:
            dof_cells[dof].append(cid)

            # Store distance between the cell dofs
            for dof2 in dofs:
                if dof == dof2:
                    continue
                p1 = dofs_x[dof]
                p2 = dofs_x[dof2]
                vec = p1 - p2
                d = (vec[0]**2 + vec[1]**2 + vec[2]**2)**0.5
                dof_dist[(dof, dof2)] = d
                dof_dist[(dof2, dof)] = d

    return dofs_x, dof_dist, cell_dofs, dof_cells
Пример #28
0
def test_cell_midpoints(D):
    from ocellaris.solver_parts.slope_limiter.limiter_cpp_utils import SlopeLimiterInput

    if D == 2:
        mesh = dolfin.UnitSquareMesh(4, 4)
    else:
        mesh = dolfin.UnitCubeMesh(2, 2, 2)

    Vx = dolfin.FunctionSpace(mesh, 'DG', 2)
    V0 = dolfin.FunctionSpace(mesh, 'DG', 0)

    py_inp = SlopeLimiterInput(mesh, Vx, V0)
    cpp_inp = py_inp.cpp_obj

    all_ok = True
    for cell in dolfin.cells(mesh):
        cid = cell.index()
        mp = cell.midpoint()
        cpp_mp = cpp_inp.cell_midpoints[cid]
        for d in range(D):
            ok = dolfin.near(mp[d], cpp_mp[d])
            if not ok:
                print(
                    '%3d %d - %10.3e %10.3e' % (cid, d, mp[d], cpp_mp[d]),
                    '<--- ERROR' if not ok else '',
                )
                all_ok = False

    assert all_ok
def assert_dof_map_test_two_blocks_with_restriction(V1, V2, block_V):
    local_dimension1 = V1.dofmap().ownership_range()[1] - V1.dofmap().ownership_range()[0]
    local_dimension2 = V2.dofmap().ownership_range()[1] - V2.dofmap().ownership_range()[0]
    block_local_dimension = block_V.block_dofmap().ownership_range()[1] - block_V.block_dofmap().ownership_range()[0]
    # Create a map from all dofs to subset of kept dofs
    map_block_to_original1 = block_V.block_dofmap().block_to_original(0)
    map_block_to_original2 = block_V.block_dofmap().block_to_original(1)
    kept_dofs1 = map_block_to_original1.values()
    kept_dofs2 = map_block_to_original2.values()
    map_block_to_original = dict()
    for (b1, a1) in map_block_to_original1.items():
        map_block_to_original[b1] = a1
    for (b2, a2) in map_block_to_original2.items():
        map_block_to_original[b2] = a2 + local_dimension1
    # Assert equality
    for c in cells(block_V.mesh()):
        V1_cell_dofs = V1.dofmap().cell_dofs(c.index())
        V1_cell_owned_local_dofs = [a1 for a1 in V1_cell_dofs if a1 in kept_dofs1 and a1 < local_dimension1]
        V1_cell_unowned_local_dofs = [a1 for a1 in V1_cell_dofs if a1 in kept_dofs1 and a1 >= local_dimension1]
        V2_cell_dofs = V2.dofmap().cell_dofs(c.index())
        V2_cell_owned_local_dofs = [a2 + local_dimension1 for a2 in V2_cell_dofs if a2 in kept_dofs2 and a2 < local_dimension2]
        V2_cell_unowned_local_dofs = [a2 + local_dimension1 for a2 in V2_cell_dofs if a2 in kept_dofs2 and a2 >= local_dimension2]
        V_cell_owned_local_dofs = concatenate((V1_cell_owned_local_dofs, V2_cell_owned_local_dofs))
        V_cell_unowned_local_dofs = concatenate((V1_cell_unowned_local_dofs, V2_cell_unowned_local_dofs))
        block_V_cell_dofs = block_V.block_dofmap().cell_dofs(c.index())
        block_V_cell_owned_local_dofs = [map_block_to_original[b] for b in block_V_cell_dofs if b < block_local_dimension]
        block_V_cell_unowned_local_dofs = [map_block_to_original[b] for b in block_V_cell_dofs if b >= block_local_dimension]
        assert_owned_local_dofs(V_cell_owned_local_dofs, block_V_cell_owned_local_dofs)
        assert_unowned_local_dofs(V_cell_unowned_local_dofs, block_V_cell_unowned_local_dofs)
Пример #30
0
def delta_dg(mesh, expr):
    V = df.FunctionSpace(mesh, "DG", 0)
    m = df.interpolate(expr, V)

    n = df.FacetNormal(mesh)
    h = df.CellSize(mesh)
    h_avg = (h('+') + h('-')) / 2

    alpha = 1.0
    gamma = 0.0

    u = df.TrialFunction(V)
    v = df.TestFunction(V)

    # for DG 0 case, only term contain alpha is nonzero
    a = df.dot(df.grad(v), df.grad(u))*df.dx \
        - df.dot(df.avg(df.grad(v)), df.jump(u, n))*df.dS \
        - df.dot(df.jump(v, n), df.avg(df.grad(u)))*df.dS \
        + alpha/h_avg*df.dot(df.jump(v, n), df.jump(u, n))*df.dS \
        - df.dot(df.grad(v), u*n)*df.ds \
        - df.dot(v*n, df.grad(u))*df.ds \
        + gamma/h*v*u*df.ds

    K = df.assemble(a).array()
    L = df.assemble(v * df.dx).array()

    h = -np.dot(K, m.vector().array()) / (L)

    xs = []
    for cell in df.cells(mesh):
        xs.append(cell.midpoint().x())

    print len(xs), len(h)
    return xs, h
Пример #31
0
def facet_dofmap(V):
    """
    When working with Crouzeix-Raviart and DGT elements with dofs on the facets
    it can be useful to get the dof corresponding to a facet index.

    Returns a list mapping from local facet index to local dof

    TODO: verify if dofmap.dofs(mesh, mesh.topology().dim()-1) is guaranteed to
          always give the same result as this
    """
    mesh = V.mesh()
    dofmap = V.dofmap()

    ndim = V.ufl_cell().topological_dimension()

    # Loop through cells and get dofs for each cell
    facet_dofmap = [None] * mesh.num_facets()
    for cell in dolfin.cells(mesh, 'all'):
        dofs = dofmap.cell_dofs(cell.index())
        facet_idxs = cell.entities(ndim - 1)
        assert len(dofs) == len(facet_idxs)

        # Loop through connected facets and store dofs for each facet
        for fidx, dof in zip(facet_idxs, dofs):
            facet_dofmap[fidx] = dof

    return facet_dofmap
def test_taylor_values(dim, degree):
    """
    Check that the Lagrange -> Taylor projection gives the correct Taylor values
    """
    if dim == 2:
        mesh = dolfin.UnitSquareMesh(4, 4)
    else:
        mesh = dolfin.UnitCubeMesh(2, 2, 2)

    # Setup Lagrange function with given derivatives and constants
    V = dolfin.FunctionSpace(mesh, 'DG', degree)
    u = dolfin.Function(V)
    if dim == 2:
        coeffs = [1, 2, -3.0] if degree == 1 else [1, 2, -3.0, -2.5, 4.2, -1.0]
    else:
        coeffs = ([1, 2, -3.0, 2.5] if degree == 1 else
                  [1, 2, -3.0, 2.5, -1.3, 4.2, -1.0, -4.2, 2.66, 3.14])
    make_taylor_func(u, coeffs)

    # Convert to Taylor
    t = dolfin.Function(V)
    lagrange_to_taylor(u, t)

    # Check that the target values are obtained
    dm = V.dofmap()
    vals = t.vector().get_local()
    for cell in dolfin.cells(mesh):
        cell_dofs = dm.cell_dofs(cell.index())
        cell_vals = vals[cell_dofs]
        assert all(abs(cell_vals - coeffs) < 1e-13)
Пример #33
0
def save_inp_of_inital_m(m, file_name):
    mesh = m.mesh()
    data_type_number = 3
    f = open(file_name, "w")
    head = "%d %d %d 0 0\n" % (mesh.num_vertices(), mesh.num_cells(),
                               data_type_number)
    f.write(head)
    xyz = mesh.coordinates()
    if np.max(xyz) < 0.5:
        print "Converting unit_length from m to nm."
        xyz = xyz * 1e9
    for i in range(len(xyz)):
        f.write("%d %0.15e %0.15e %0.15e\n" %
                (i + 1, xyz[i][0], xyz[i][1], xyz[i][2]))

    for c in df.cells(mesh):
        id = c.index()
        ce = c.entities(0)
        f.write("%d 1 tet %d %d %d %d\n" %
                (id + 1, ce[0] + 1, ce[1] + 1, ce[2] + 1, ce[3] + 1))
    f.write("3 1 1 1\nM_x, none\nM_y, none\nM_z, none\n")

    data = m.get_numpy_array_debug().reshape(3, -1)
    for i in range(mesh.num_vertices()):
        f.write("%d %0.15e %0.15e %0.15e\n" %
                (i + 1, data[0][i], data[1][i], data[2][i]))

    f.close()
Пример #34
0
    def __init__(self, mesh, orientation):
        # Manifold assumption
        assert 1 <= mesh.topology().dim() < mesh.geometry().dim()
        gdim = mesh.geometry().dim()

        # Orientation from inside point
        if isinstance(orientation, (list, np.ndarray, tuple)):
            assert len(orientation) == gdim

            kwargs = {'x0%d' % i: val for i, val in enumerate(orientation)}
            orientation = ['x[%d] - x0%d' % (i, i) for i in range(gdim)]
            orientation = df.Expression(orientation, degree=1, **kwargs)

        assert orientation.ufl_shape == (gdim, )

        V = df.VectorFunctionSpace(mesh, 'DG', 0, gdim)
        df.Function.__init__(self, V)
        n_values = self.vector().get_local()

        values = []
        for cell in df.cells(mesh):
            n = cell.cell_normal().array()[:gdim]
            x = cell.midpoint().array()[:gdim]
            # Disagree?
            if np.inner(orientation(x), n) < 0:
                n *= -1
            values.append(n / np.linalg.norm(n))
        values = np.array(values)

        for sub in range(gdim):
            dofs = V.sub(sub).dofmap().dofs()
            n_values[dofs] = values[:, sub]
        self.vector().set_local(n_values)
        self.vector().apply('insert')
Пример #35
0
def facet_info_and_dofmap(V):
    """
    Return three lists

      1) A list which maps facet index to dof
      2) A list which maps facet index to facet area
      2) A list which maps facet index to facet midpoint
    """
    mesh = V.mesh()
    dofmap = V.dofmap()

    ndim = V.ufl_cell().topological_dimension()

    # Loop through cells and get dofs for each cell
    facet_dofs = [None] * mesh.num_facets()
    facet_area = [None] * mesh.num_facets()
    facet_midp = [None] * mesh.num_facets()
    for cell in dolfin.cells(mesh):
        dofs = dofmap.cell_dofs(cell.index())
        facet_idxs = cell.entities(ndim - 1)

        # Only works for functions with one dof on each facet
        assert len(dofs) == len(facet_idxs)

        # Loop through connected facets and store dofs for each facet
        for i, fidx in enumerate(facet_idxs):
            facet_dofs[fidx] = dofs[i]
            facet_area[fidx] = cell.facet_area(i)
            mp = dolfin.Facet(mesh, fidx).midpoint()
            facet_midp[fidx] = (mp.x(), mp.y())

    return facet_dofs, facet_area, facet_midp
def assert_dof_map_single_block_with_restriction(V, block_V):
    local_dimension = V.dofmap().ownership_range()[1] - V.dofmap(
    ).ownership_range()[0]
    block_local_dimension = block_V.block_dofmap().ownership_range(
    )[1] - block_V.block_dofmap().ownership_range()[0]
    # Create a map from all dofs to subset of kept dofs
    map_block_to_original = block_V.block_dofmap().block_to_original(0)
    kept_dofs = map_block_to_original.values()
    # Assert equality
    for c in cells(block_V.mesh()):
        V_cell_dofs = V.dofmap().cell_dofs(c.index())
        V_cell_owned_local_dofs = [
            a for a in V_cell_dofs if a in kept_dofs and a < local_dimension
        ]
        V_cell_unowned_local_dofs = [
            a for a in V_cell_dofs if a in kept_dofs and a >= local_dimension
        ]
        block_V_cell_dofs = block_V.block_dofmap().cell_dofs(c.index())
        block_V_cell_owned_local_dofs = [
            map_block_to_original[b] for b in block_V_cell_dofs
            if b < block_local_dimension
        ]
        block_V_cell_unowned_local_dofs = [
            map_block_to_original[b] for b in block_V_cell_dofs
            if b >= block_local_dimension
        ]
        assert_owned_local_dofs(V_cell_owned_local_dofs,
                                block_V_cell_owned_local_dofs)
        assert_unowned_local_dofs(V_cell_unowned_local_dofs,
                                  block_V_cell_unowned_local_dofs)
Пример #37
0
def refine_perimeter(mesh):
    """Refine largest boundary triangles."""
    mesh.init(1, 2)
    perimeter = [c for c in cells(mesh)
                 if any([f.exterior() for f in facets(c)])]
    marker = CellFunction('bool', mesh, False)
    max_size = max([c.diameter() for c in perimeter])
    for c in perimeter:
        marker[c] = c.diameter() > 0.75 * max_size
    return refine(mesh, marker)
Пример #38
0
def refine_cylinder(mesh):
    'Refine mesh by cutting cells around the cylinder.'
    h = mesh.hmin()
    center = Point(c_x, c_y)
    cell_f = CellFunction('bool', mesh, False)
    for cell in cells(mesh):
        if cell.midpoint().distance(center) < r + h:
            cell_f[cell] = True
    mesh = refine(mesh, cell_f)

    return mesh
Пример #39
0
def build_cell_to_dof(V):
  '''Build a dictionary between cell and dofs that it has.'''
  mesh = V.mesh()
  dofmap = V.dofmap()
  cell_to_dof = {}
  for cell in cells(mesh):
    cell_to_dof[cell.index()] = []
    for dof in dofmap.cell_dofs(cell.index()): 
      cell_to_dof[cell.index()].append(dof)

  return cell_to_dof
Пример #40
0
def build_cell_to_edge(V):
  '''Build mapping between cell and edges that form it.'''
  cell_to_edge = {}
  mesh = V.mesh()
  mesh.init(1)
  for cell in cells(mesh):
    cell_to_edge[cell.index()] = []
    for edge in edges(cell):
      cell_to_edge[cell.index()].append(edge.index())

  return cell_to_edge
Пример #41
0
def weighted_H1_norm(w, vec, piecewise=False):
    if piecewise:
        DG = FunctionSpace(vec.basis.mesh, "DG", 0)
        s = TestFunction(DG)
        ae = assemble(w * inner(nabla_grad(vec._fefunc), nabla_grad(vec._fefunc)) * s * dx)
        norm_vec = np.array([sqrt(e) for e in ae])
        # map DG dofs to cell indices
        dofs = [DG.dofmap().cell_dofs(c.index())[0] for c in cells(vec.basis.mesh)]
        norm_vec = norm_vec[dofs]
    else:
        ae = assemble(w * inner(nabla_grad(vec._fefunc), nabla_grad(vec._fefunc)) * dx)
        norm_vec = sqrt(ae)
    return norm_vec
Пример #42
0
 def get_dof_coordinates(self):
     V = self._fefs
     # degrees of freedom
     N = V.dofmap().global_dimension()
     c4dof = np.zeros((N, V.mesh().geometry().dim()))
     # evaluate nodes matrix by local-to-global map on each cells
     for c in cells(V.mesh()):
         # coordinates of nodes in current cell
         cell_c4dof = V.dofmap().tabulate_coordinates(c)
         # global indices of nodes in current cell
         nodes = V.dofmap().cell_dofs(c.index())
         # set global nodes coordinates
         c4dof[nodes] = cell_c4dof
     return c4dof
Пример #43
0
def refine_boundary_layers(mesh, s, d, x0, x1):

   from dolfin import CellFunction, cells, refine, DOLFIN_EPS

   h = mesh.hmax()
   cell_markers = CellFunction('bool', mesh, mesh.topology().dim())
   cell_markers.set_all(False)

   for cell in cells(mesh):
      x = cell.midpoint()
      for i, d_ in enumerate(d):
         if x[d_] > (x1[i]-s*h-DOLFIN_EPS) or x[d_] < (s*h + x0[i] + DOLFIN_EPS):
            cell_markers[cell] = True
         
   return refine(mesh, cell_markers)
Пример #44
0
def poincare_friedrichs_cutoff(o, p):
    if isinstance(o, Mesh):
        # TODO: easy fix - ghosted mesh + missing reduction
        not_working_in_parallel("PF cutoff on mesh")
        return max(poincare_friedrichs_cutoff(v, p) for v in vertices(o))

    if isinstance(o, Vertex):
        # TODO: fix using ghosted mesh
        not_working_in_parallel("PF cutoff on patch")
        hat_fun_grad = max(hat_function_grad(o, c) for c in cells(o))
        if any(f.exterior() for f in facets(o)):
            return 1.0 + friedrichs_const(o, p) * hat_fun_grad
        else:
            return 1.0 + poincare_const(o, p) * hat_fun_grad

    raise NotImplementedError
Пример #45
0
def build_edge_to_dof(V, cell_to_edge):
  '''Build mapping between edges and dofs on it.'''
  edge_to_dof = {}
  mesh = V.mesh()
  dofmap = V.dofmap()
  for cell in cells(mesh):
    cell_edges = cell_to_edge[cell.index()]
    for i in range(3): # there are 3 edges in cell
      edge = cell_edges[i]
      edge_dofs = dofmap.cell_dofs(cell.index())[dofmap.tabulate_facet_dofs(i)]
      if edge in edge_to_dof:
        for edge_dof in edge_dofs:
          edge_to_dof[edge].add(edge_dof)
      else:
        edge_to_dof[edge] = set(edge_dofs)

  return edge_to_dof
def computeEdgeCRDofArray(V, mesh, B=None):
    # dof map, dim_V = 2 * num_E
    num_E = mesh.num_facets()
    dofmap = V.dofmap()
    edgeCRDofArray = np.zeros((num_E, 2))

    # loop over cells and fill array
    for k, cell in enumerate(cells(mesh)):
        # list of dof-indices for edges of the cell
        dofs = dofmap.cell_dofs(cell.index())
        for i, facet in enumerate(facets(cell)):
            # print 'cell: %3g  ||  i: %3g   || facet: %3g || dof[i]: %3g' \
            #     % (cell.index(), i, facet.index(), dofs[i])
            # corresponding DoFs (2 basisfct per edge)
            edgeCRDofArray[facet.index()] = [dofs[i], dofs[i+3]]
            # edgeCRDofArray[facet.index()] = [dofs[i], dofs[i]+1]
            # every interior edge visited twice but EGAL!

    return edgeCRDofArray
Пример #47
0
def _error_estimator(dx, phi, mu, sigma, omega, conv, voltages):
    '''Simple error estimator from

        A posteriori error estimation and adaptive mesh-refinement techniques;
        R. Verfürth;
        Journal of Computational and Applied Mathematics;
        Volume 50, Issues 1-3, 20 May 1994, Pages 67-83;
        <https://www.sciencedirect.com/science/article/pii/0377042794902909>.

    The strong PDE is

        - div(1/(mu r) grad(rphi)) + <u, 1/r grad(rphi)> + i sigma omega phi
      = sigma v_k / (2 pi r).
    '''
    from dolfin import cells
    mesh = phi.function_space().mesh()
    # Assemble the cell-wise residual in DG space
    DG = FunctionSpace(mesh, 'DG', 0)
    # get residual in DG
    v = TestFunction(DG)
    R = _residual_strong(dx, v, phi, mu, sigma, omega, conv, voltages)
    r_r = assemble(R[0])
    r_i = assemble(R[1])
    r = r_r * r_r + r_i * r_i
    visualize = True
    if visualize:
        # Plot the cell-wise residual
        u = TrialFunction(DG)
        a = zero() * dx(0)
        subdomain_indices = mu.keys()
        for i in subdomain_indices:
            a += u * v * dx(i)
        A = assemble(a)
        R2 = Function(DG)
        solve(A, R2.vector(), r)
        plot(R2, title='||R||^2')
        interactive()
    K = r.array()
    info('%r' % K)
    h = numpy.array([c.diameter() for c in cells(mesh)])
    eta = h * numpy.sqrt(K)
    return eta
def plot_mesh(mesh,savefig = False,show = False):
    n = mesh.num_vertices()
    d = mesh.geometry().dim()

    # Create the triangulation
    mesh_coordinates = mesh.coordinates().reshape((n, d))
    triangles = np.asarray([cell.entities(0) for cell in df.cells(mesh)])
    triangulation = tri.Triangulation(mesh_coordinates[:, 0],
                                      mesh_coordinates[:, 1],
                                      triangles)

    triangulation.x *=1e6 
    triangulation.y *=1e6 
    # Plot the mesh
    fig = plt.figure(figsize=(7.0, 7.0))
    plt.triplot(triangulation)
    plt.xlabel(r'$x(\mu m)$')
    plt.ylabel(r'$y(\mu m)$')

    if savefig != False:
        plt.savefig(savefig)
    if show != False:
        plt.show()
    return None
Пример #49
0
editor.init_vertices(6)
editor.init_cells(2)

vertex_0 = Vertex(mesh, 0)
vertex_1 = Vertex(mesh, 1)
vertex_2 = Vertex(mesh, 2)
vertex_3 = Vertex(mesh, 3)

vertex_4 = Vertex(mesh, 4)
vertex_5 = Vertex(mesh, 5)

editor.add_cell(0, 1, 2, 3)
editor.add_cell(1, 0, 2, 3)

editor.close()


t = IonTag("foo", 3, "int", mesh)

for v in vertices(mesh):
    t[v] = [1, 2, 3]

for c in cells(mesh):
    t[c] = [4, 5, 6, 7]


v = MeshEntity(mesh, 0, 1)

print t[v]
Пример #50
0
def create_joint_mesh(meshes, destmesh=None, additional_refine=0):
    if destmesh is None:
        # start with finest mesh to avoid (most) refinements
#        hmin = [m.hmin() for m in meshes]
#        mind = hmin.index(min(hmin))
        numcells = [m.num_cells() for m in meshes]
        mind = numcells.index(max(numcells))
        destmesh = meshes.pop(mind)

    try:
        # test for old FEniCS version < 1.2
        destmesh.closest_cell(Point(0,0))
        bbt = None
    except:
        # FEniCS > 1.2
        bbt = destmesh.bounding_box_tree()

    # setup parent cells
    parents = {}
    for c in cells(destmesh):
        parents[c.index()] = [c.index()]
    PM = []

    # refinement loop for destmesh
    for m in meshes:
        # loop until all cells of destmesh are finer than the respective cells in the set of meshes
        while True: 
            cf = CellFunction("bool", destmesh)
            cf.set_all(False)
            rc = 0      # counter for number of marked cells
            # get cell sizes of current mesh
            h = [c.diameter() for c in cells(destmesh)]
            # check all cells with destination sizes and mark for refinement when destination mesh is coarser (=larger)
            for c in cells(m):
                p = c.midpoint()
                if bbt is not None:
                    # FEniCS > 1.2
                    cid = bbt.compute_closest_entity(p)[0]
                else:
                    # FEniCS < 1.2
                    cid = destmesh.closest_cell(p)
                if h[cid] > c.diameter():
                    cf[cid] = True
                    rc += 1
            # carry out refinement if any cells are marked
            if rc:
                # refine marked cells
                newmesh = refine(destmesh, cf)
                # determine parent cell association map
                pc = newmesh.data().array("parent_cell", newmesh.topology().dim())
                pmap = defaultdict(list)
                for i, cid in enumerate(pc):
                    pmap[cid].append(i)
                PM.append(pmap)
                # set refined mesh as current mesh
                destmesh = newmesh
            else:
                break

        # carry out additional uniform refinements
        for _ in range(additional_refine):
            # refine uniformly
            newmesh = refine(destmesh)
            # determine parent cell association map
            pc = newmesh.data().array("parent_cell", newmesh.topology().dim())
            pmap = defaultdict(list)
            for i, cid in enumerate(pc):
                pmap[cid].append(i)
            PM.append(pmap)
            # set refined mesh as current mesh
            destmesh = newmesh

    # determine association to parent cells
    for level in range(len(PM)):
        for parentid, childids in parents.iteritems():
            newchildids = []
            for cid in childids:
                for cid in PM[level][cid]:
                    newchildids.append(cid)
            parents[parentid] = newchildids
    
    return destmesh, parents
Пример #51
0
    def _evaluateLocalEstimator(cls, mu, w, coeff_field, pde, f, quadrature_degree, epsilon=1e-5):
        """Evaluation of patch local equilibrated estimator."""

        # prepare numerical flux and f
        sigma_mu, f_mu = evaluate_numerical_flux(w, mu, coeff_field, f)

        # ###################
        # ## MIXED PROBLEM ##
        # ###################

        # get setup data for mixed problem
        V = w[mu]._fefunc.function_space()
        mesh = V.mesh()
        mesh.init()
        degree = element_degree(w[mu]._fefunc)

        # data for nodal bases
        V_dm = V.dofmap()
        V_dofs = dict([(i, V_dm.cell_dofs(i)) for i in range(mesh.num_cells())])
        V1 = FunctionSpace(mesh, 'CG', 1)   # V1 is to define nodal base functions
        phi_z = Function(V1)
        phi_coeffs = np.ndarray(V1.dim())
        vertex_dof_map = V1.dofmap().vertex_to_dof_map(mesh)
        # vertex_dof_map = vertex_to_dof_map(V1)
        dof_list = vertex_dof_map.tolist()
        # DG0 localisation
        DG0 = FunctionSpace(mesh, 'DG', 0)
        DG0_dofs = dict([(c.index(),DG0.dofmap().cell_dofs(c.index())[0]) for c in cells(mesh)])
        dg0 = TestFunction(DG0)
        # characteristic function of patch
        xi_z = Function(DG0)
        xi_coeffs = np.ndarray(DG0.dim())
        # mesh data
        h = CellSize(mesh)
        n = FacetNormal(mesh)
        cf = CellFunction('size_t', mesh)
        # setup error estimator vector
        eq_est = np.zeros(DG0.dim())

        # setup global equilibrated flux vector
        DG = VectorFunctionSpace(mesh, "DG", degree)
        DG_dofmap = DG.dofmap()

        # define form functions
        tau = TrialFunction(DG)
        v = TestFunction(DG)

        # define global tau
        tau_global = Function(DG)
        tau_global.vector()[:] = 0.0

        # iterate vertices
        for vertex in vertices(mesh):
            # get patch cell indices
            vid = vertex.index()
            patch_cid, FF_inner, FF_boundary = get_vertex_patch(vid, mesh, layers=1)

            # set nodal base function
            phi_coeffs[:] = 0
            phi_coeffs[dof_list.index(vid)] = 1
            phi_z.vector()[:] = phi_coeffs

            # set characteristic function and mark patch
            cf.set_all(0)
            xi_coeffs[:] = 0
            for cid in patch_cid:
                xi_coeffs[DG0_dofs[int(cid)]] = 1
                cf[int(cid)] = 1
            xi_z.vector()[:] = xi_coeffs

            # determine local dofs
            lDG_cell_dofs = dict([(cid, DG_dofmap.cell_dofs(cid)) for cid in patch_cid])
            lDG_dofs = [cd.tolist() for cd in lDG_cell_dofs.values()]
            lDG_dofs = list(iter.chain(*lDG_dofs))

            # print "\nlocal DG subspace has dimension", len(lDG_dofs), "degree", degree, "cells", len(patch_cid), patch_cid
            # print "local DG_cell_dofs", lDG_cell_dofs
            # print "local DG_dofs", lDG_dofs

            # create patch measures
            dx = Measure('dx')[cf]
            dS = Measure('dS')[FF_inner]

            # define forms
            alpha = Constant(1 / epsilon) / h
            a = inner(tau,v) * phi_z * dx(1) + alpha * div(tau) * div(v) * dx(1) + avg(alpha) * jump(tau,n) * jump(v,n) * dS(1)\
                + avg(alpha) * jump(xi_z * tau,n) * jump(v,n) * dS(2)
            L = -alpha * (div(sigma_mu) + f) * div(v) * phi_z * dx(1)\
                - avg(alpha) * jump(sigma_mu,n) * jump(v,n) * avg(phi_z)*dS(1)

    #        print "L2 f + div(sigma)", assemble((f + div(sigma)) * (f + div(sigma)) * dx(0))

            # assemble forms
            lhs = assemble(a, form_compiler_parameters={'quadrature_degree': quadrature_degree})
            rhs = assemble(L, form_compiler_parameters={'quadrature_degree': quadrature_degree})

            # convert DOLFIN representation to scipy sparse arrays
            rows, cols, values = lhs.data()
            lhsA = sps.csr_matrix((values, cols, rows)).tocoo()

            # slice sparse matrix and solve linear problem
            lhsA = coo_submatrix_pull(lhsA, lDG_dofs, lDG_dofs)
            lx = spsolve(lhsA, rhs.array()[lDG_dofs])
            # print ">>> local solution lx", type(lx), lx
            local_tau = Function(DG)
            local_tau.vector()[lDG_dofs] = lx
            # print "div(tau)", assemble(inner(div(local_tau),div(local_tau))*dx(1))

            # add up local fluxes
            tau_global.vector()[lDG_dofs] += lx

        # evaluate estimator
        # maybe TODO: re-define measure dx
        eq_est = assemble( inner(tau_global, tau_global) * dg0 * (dx(0)+dx(1)),\
                           form_compiler_parameters={'quadrature_degree': quadrature_degree})

        # reorder according to cell ids
        eq_est = eq_est[DG0_dofs.values()].array()
        global_est = np.sqrt(np.sum(eq_est))
        # eq_est_global = assemble( inner(tau_global, tau_global) * (dx(0)+dx(1)), form_compiler_parameters={'quadrature_degree': quadrature_degree} )
        # global_est2 = np.sqrt(np.sum(eq_est_global))
        return global_est, FlatVector(np.sqrt(eq_est))#, tau_global
Пример #52
0
    def _evaluateResidualEstimator(cls, mu, w, coeff_field, pde, f, quadrature_degree):
        """Evaluate the residual error according to EGSZ (5.7) which consists of volume terms (5.3) and jump terms (5.5).

            .. math:: \eta_{\mu,T}(w_N) &:= h_T || \overline{a}^{-1/2} (f\delta_{\mu,0} + \nabla\overline{a}\cdot\nabla w_{N,\mu}
                                + \sum_{m=1}^\infty \nabla a_m\cdot\nabla( \alpha^m_{\mu_m+1}\Pi_\mu^{\mu+e_m} w_{N,\mu+e_m}
                                - \alpha_{\mu_m}^m w_{N,\mu} + \alpha_{\mu_m-1}^m\Pi_\mu^{\mu_m-e_m} w_{N,\mu-e_m} ||_{L^2(T)}\\
          \eta_{\mu,S}(w_N) &:= h_S^{-1/2} || \overline{a}^{-1/2} [(\overline{a}\nabla w_{N,\mu} + \sum_{m=1}^\infty a_m\nabla
                                  ( \alpha_{\mu_m+1}^m\Pi_\mu^{\mu+e_m} w_{N,\mu+e_m} - \alpha_{\mu_m}^m w_{N,\mu}
                                  + \alpha_{\mu_m-1}^m\Pi_\mu^{\mu-e_m} w_{N,\mu-e_m})\cdot\nu] ||_{L^2(S)}\\
        """
        # set quadrature degree
        quadrature_degree_old = parameters["form_compiler"]["quadrature_degree"]
        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree
        logger.debug("residual quadrature order = " + str(quadrature_degree))
    
        # get pde residual terms
        r_T = pde.volume_residual
        r_E = pde.edge_residual
        r_Nb = pde.neumann_residual
        
        # get mean field of coefficient
        a0_f = coeff_field.mean_func

        # prepare some FEM variables
        V = w[mu]._fefunc.function_space()
        mesh = V.mesh()
        nu = FacetNormal(mesh)

        # initialise volume and edge residual with deterministic part
#        R_T = dot(nabla_grad(a0_f), nabla_grad(w[mu]._fefunc))
        R_T = r_T(a0_f, w[mu]._fefunc)
        if not mu:
            R_T = R_T + f
#        R_E = a0_f * dot(nabla_grad(w[mu]._fefunc), nu)
        R_E = r_E(a0_f, w[mu]._fefunc, nu)
        # get Neumann residual
        homogeneousNBC = False if mu.order == 0 else True
        R_Nb = r_Nb(a0_f, w[mu]._fefunc, nu, mesh, homogeneous=homogeneousNBC)

        # iterate m
        Lambda = w.active_indices()
        maxm = w.max_order
        if len(coeff_field) < maxm:
            logger.warning("insufficient length of coefficient field for MultiVector (%i < %i)", len(coeff_field), maxm)
            maxm = len(coeff_field)
            #        assert coeff_field.length >= maxm        # ensure coeff_field expansion is sufficiently long
        for m in range(maxm):
            am_f, am_rv = coeff_field[m]

            # prepare polynom coefficients
            beta = am_rv.orth_polys.get_beta(mu[m])

            # mu
            res = -beta[0] * w[mu]

            # mu+1
            mu1 = mu.inc(m)
            if mu1 in Lambda:
                w_mu1 = w.get_projection(mu1, mu)
                res += beta[1] * w_mu1

            # mu-1
            mu2 = mu.dec(m)
            if mu2 in Lambda:
                w_mu2 = w.get_projection(mu2, mu)
                res += beta[-1] * w_mu2

            # add volume contribution for m
#            r_t = dot(nabla_grad(am_f), nabla_grad(res._fefunc))
            R_T = R_T + r_T(am_f, res._fefunc)
            # add edge contribution for m
#            r_e = am_f * dot(nabla_grad(res._fefunc), nu)
            R_E = R_E + r_E(am_f, res._fefunc, nu)

        # prepare more FEM variables for residual assembly
        DG = FunctionSpace(mesh, "DG", 0)
        s = TestFunction(DG)
        h = CellSize(mesh)

        # scaling of residual terms and definition of residual form
        a0_s = a0_f[0] if isinstance(a0_f, tuple) else a0_f     # required for elasticity parameters
        res_form = (h ** 2 * (1 / a0_s) * dot(R_T, R_T) * s * dx
                    + avg(h) * dot(avg(R_E) / avg(a0_s), avg(R_E)) * 2 * avg(s) * dS)
        
        resT = h ** 2 * (1 / a0_s) * dot(R_T, R_T) * s * dx
        resE = 0 * s * dx + avg(h) * dot(avg(R_E) / avg(a0_s), avg(R_E)) * 2 * avg(s) * dS
        resNb = 0 * s * dx
        
        # add Neumann residuals
        if R_Nb is not None:
            for rj, dsj in R_Nb:
                res_form = res_form + h * (1 / a0_s) * dot(rj, rj) * s * dsj
                resNb += h * (1 / a0_s) * dot(rj, rj) * s * dsj

        # FEM evaluate residual on mesh
        eta = assemble(res_form)
        eta_indicator = np.array([sqrt(e) for e in eta])
        # map DG dofs to cell indices
        dofs = [DG.dofmap().cell_dofs(c.index())[0] for c in cells(mesh)]
        eta_indicator = eta_indicator[dofs]
        global_error = sqrt(sum(e for e in eta))

        # debug ---
        if False:
            etaT = assemble(resT)
            etaT_indicator = etaT #np.array([sqrt(e) for e in etaT])
            etaT = sqrt(sum(e for e in etaT))
            etaE = assemble(resE)
            etaE_indicator = etaE #np.array([sqrt(e) for e in etaE])
            etaE = sqrt(sum(e for e in etaE))
            etaNb = assemble(resNb)
            etaNb_indicator = etaNb #np.array([sqrt(e) for e in etaNb])
            etaNb = sqrt(sum(e for e in etaNb))
        
            print "==========RESIDUAL ESTIMATOR============"
            print "eta", eta
            print "eta_indicator", eta_indicator
            print "global =", global_error
            print "volume =", etaT
            print "edge =", etaE
            print "Neumann =", etaNb

            if False:        
                plot_indicators(((eta, "overall residual"), (etaT_indicator, "volume residual"), (etaE_indicator, "edge residual"), (etaNb_indicator, "Neumann residual")), mesh)
        # ---debug
        
        # restore quadrature degree
        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree_old

        return (FlatVector(eta_indicator), global_error)
Пример #53
0
    def _evaluateGlobalMixedEstimator(cls, mu, w, coeff_field, pde, f, quadrature_degree, vectorspace_type='BDM'):
        """Evaluation of global mixed equilibrated estimator."""
        # set quadrature degree
#        quadrature_degree_old = parameters["form_compiler"]["quadrature_degree"]
#        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree
#        logger.debug("residual quadrature order = " + str(quadrature_degree))

        # prepare numerical flux and f
        sigma_mu, f_mu = evaluate_numerical_flux(w, mu, coeff_field, f)

        # ###################
        # ## MIXED PROBLEM ##
        # ###################

        # get setup data for mixed problem
        V = w[mu]._fefunc.function_space()
        mesh = V.mesh()
        degree = element_degree(w[mu]._fefunc)

        # create function spaces
        DG0 = FunctionSpace(mesh, 'DG', 0)
        DG0_dofs = [DG0.dofmap().cell_dofs(c.index())[0] for c in cells(mesh)]
        RT = FunctionSpace(mesh, vectorspace_type, degree)
        W = RT * DG0

        # setup boundary conditions
#        bcs = pde.create_dirichlet_bcs(W.sub(1))

        # debug ===
        # from dolfin import DOLFIN_EPS, DirichletBC
        # def boundary(x):
        #     return x[0] < DOLFIN_EPS or x[0] > 1.0 + DOLFIN_EPS or x[1] < DOLFIN_EPS or x[1] > 1.0 + DOLFIN_EPS
        # bcs = [DirichletBC(W.sub(1), Constant(0.0), boundary)]
        # === debug

        # create trial and test functions
        (sigma, u) = TrialFunctions(W)
        (tau, v) = TestFunctions(W)

        # define variational form
        a_eq = (dot(sigma, tau) + div(tau) * u + div(sigma) * v) * dx
        L_eq = (- f_mu * v + dot(sigma_mu, tau)) * dx

        # compute solution
        w_eq = Function(W)
        solve(a_eq == L_eq, w_eq)
        (sigma_mixed, u_mixed) = w_eq.split()

        # #############################
        # ## EQUILIBRATION ESTIMATOR ##
        # #############################

        # evaluate error estimator
        dg0 = TestFunction(DG0)
        eta_mu = inner(sigma_mu, sigma_mu) * dg0 * dx
        eta_T = assemble(eta_mu, form_compiler_parameters={'quadrature_degree': quadrature_degree})
        eta_T = np.array([sqrt(e) for e in eta_T])

        # evaluate global error
        eta = sqrt(sum(i**2 for i in eta_T))
        # reorder array entries for local estimators
        eta_T = eta_T[DG0_dofs]

        # restore quadrature degree
#        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree_old

        return eta, FlatVector(eta_T)
Пример #54
0
def trace_3d1d_matrix(V, TV, reduced_mesh):
    '''Trace from 3d to 1d. Makes sense only for CG space'''
    assert reduced_mesh.id() == TV.mesh().id()
    assert V.ufl_element().family() == 'Lagrange'
    
    mesh = V.mesh()
    line_mesh = TV.mesh()
    
    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    # We use the map to get (1d cell -> [3d edge) -> 3d cell]
    if hasattr(reduced_mesh, 'parent_entity_map'):
        # ( )
        mapping = reduced_mesh.parent_entity_map[mesh.id()][1]
        # [ ]
        mesh.init(1)
        mesh.init(1, 3)
        e2c = mesh.topology()(1, 3)
        # From 1d cell (by index)
        get_cell3d = lambda c, d1d3=mapping, d3d3=e2c: d3d3(d1d3[c.index()])[0]
    # Tree collision by midpoint
    else:
        tree = mesh.bounding_box_tree()
        limit = mesh.num_cells()

        get_cell3d = lambda c, tree=tree, bound=limit: (
            lambda index: index if index<bound else None
        )(tree.compute_first_entity_collision(c.midpoint()))
  
    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()               
    basis_values = np.zeros(V.element().space_dimension()*value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # Get the tangent => orthogonal tangent vectors
            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]

            # Let's get a 3d cell to use for getting the V values
            # CG assumption allows taking any
            tet_cell = get_cell3d(line_cell)
            if tet_cell is None: continue
            
            Vcell = Cell(mesh, tet_cell)
            vertex_coordinates = Vcell.get_vertex_coordinates()
            cell_orientation = 0
            # Columns are determined by V cell! I guess the sparsity
            # could be improved if for x_dofs of TV only x_dofs of V
            # were considered
            column_indices = np.array(V_dm.cell_dofs(tet_cell), dtype='int32')

            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # 3d at point
                Vel.evaluate_basis_all(basis_values, avg_point, vertex_coordinates, cell_orientation)
                # The thing now is that with data we can assign to several
                # rows of the matrix. Shift determines the (x, y, ... ) or
                # (xx, xy, yx, ...) component of Q
                data = basis_values.reshape((-1, value_size)).T
                for shift, column_values in enumerate(data):
                    row = scalar_row + shift
                    mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return PETScMatrix(mat)
Пример #55
0
def sphere_average_matrix(V, TV, radius, quad_degree):
    '''Averaging matrix over the sphere'''
    mesh = V.mesh()
    line_mesh = TV.mesh()
    # Lebedev below need off degrees
    if quad_degree % 2 == 0: quad_degree += 1
    # NOTE: this is a dependency
    from quadpy.sphere import Lebedev

    integrator = Lebedev(quad_degree)
    xq = integrator.points
    wq = integrator.weights
    
    if is_number(radius):
         radius = lambda x, radius=radius: radius 

    mesh_x = TV.mesh().coordinates()
    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    # Eval at points will require serch
    tree = mesh.bounding_box_tree()
    limit = mesh.num_cells()

    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()               
    basis_values = np.zeros(V.element().space_dimension()*value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]
            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # Get radius and integration points
                rad = radius(avg_point)
                # Scale and shift the unit sphere to the point
                integration_points = xq*rad + avg_point

                data = {}
                for index, ip in enumerate(integration_points):
                    c = tree.compute_first_entity_collision(Point(*ip))
                    if c >= limit: continue

                    Vcell = Cell(mesh, c)
                    vertex_coordinates = Vcell.get_vertex_coordinates()
                    cell_orientation = Vcell.orientation()
                    Vel.evaluate_basis_all(basis_values, ip, vertex_coordinates, cell_orientation)

                    cols_ip = V_dm.cell_dofs(c)
                    values_ip = basis_values*wq[index]
                    # Add
                    for col, value in zip(cols_ip, values_ip.reshape((-1, value_size))):
                        if col in data:
                            data[col] += value
                        else:
                            data[col] = value
                            
                # The thing now that with data we can assign to several
                # rows of the matrix
                column_indices = np.array(data.keys(), dtype='int32')
                for shift in range(value_size):
                    row = scalar_row + shift
                    column_values = np.array([data[col][shift] for col in column_indices])
                    mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return PETScMatrix(mat)
Пример #56
0
def OverlapMesh(mesh1, mesh2, tol=1E-14):
    '''
    Given two subdomain meshes which share a single unique common tag in 
    their marking function we create here a mesh of cells corresponding 
    to that tag. The new mesh cells can be mapped to mesh1/2 cells.
    '''
    assert isinstance(mesh1, SubDomainMesh), type(mesh1)
    assert isinstance(mesh2, SubDomainMesh)

    tdim = mesh1.topology().dim()
    assert mesh2.topology().dim() == tdim
    assert mesh1.geometry().dim() == mesh2.geometry().dim()
    
    # Overlap has to be unique as well (for now)
    tags1 = set(mesh1.marking_function.array())
    tags2 = set(mesh2.marking_function.array())

    common_tags = tags1 & tags2
    assert len(common_tags) == 1
    tag = int(common_tags.pop())

    # A bit of wishful thinking here: create overlap mesh from mesh1
    # and hope it makes sense for mesh2 as well
    emesh = SubDomainMesh(mesh1.marking_function, tag)
    # Now we have a mesh from cells of omega to cells in mesh1. Let's
    # build a map for mesh2 simlar to `build_embedding_map`
    tree = mesh2.bounding_box_tree()
    # Localize first the vertex in mesh2
    mesh2.init(tdim)    # Then we want to find a cell in mesh2 which
    mesh2.init(tdim, 0)  # has the same vertices

    c2v = mesh2.topology()(tdim, 0)

    mesh_x = mesh2.coordinates()
    emesh_x = emesh.coordinates()
    # Get som idea of mesh size to make relative comparison of coords
    scale = max(emesh_x.max(axis=0) - emesh_x.min(axis=0))
    # Also build the map for vertices
    entity_map = {0: [None]*emesh.num_vertices(), tdim: [None]*emesh.num_cells()}
    vertex_map = entity_map[0]
    cell_map = entity_map[tdim]

    collided_cells = {}
    for cell in df.cells(emesh):
        # The idea is the there is exactly on the_cell which will be
        # found in every point-cell collision patches
        the_cell = set()

        for vertex in cell.entities(0):
            # Try to be less efficient by computing each vertex collision
            # only once
            if vertex in collided_cells:
                mcells = collided_cells[vertex]
            else:
                vertex_x = emesh_x[vertex]
                mcells = tree.compute_entity_collisions(df.Point(*vertex_x))
                # What is the id of vertex in the mesh
                mcell_vertices = c2v(next(iter(mcells)))
                the_vertex = min(mcell_vertices, key=lambda v: np.linalg.norm(vertex_x-mesh_x[v]))
                error = np.linalg.norm(vertex_x - mesh_x[the_vertex])/scale
                assert error < tol, 'Found a hanging node %16f' % error

                vertex_map[vertex] = the_vertex
                collided_cells[vertex] = mcells
            
            if not the_cell:
                the_cell.update(mcells)
            else:
                the_cell = the_cell & set(mcells)
        assert len(the_cell) == 1, the_cell
        # Insert
        cell_map[cell.index()] = the_cell.pop()
    # Sanity
    assert not any(v is None for v in entity_map[0])
    assert not any(v is None for v in entity_map[tdim])
    # At this point we can build add the map
    emesh.parent_entity_map[mesh2.id()] = entity_map

    return emesh
Пример #57
0
    return emesh


# -------------------------------------------------------------------


if __name__ == '__main__':
    mesh = df.UnitSquareMesh(4, 4)
    subdomains = df.MeshFunction('size_t', mesh, mesh.topology().dim(), 3)
    df.CompiledSubDomain('x[0] < 0.25+DOLFIN_EPS').mark(subdomains, 1)
    df.CompiledSubDomain('x[0] > 0.75-DOLFIN_EPS').mark(subdomains, 2)

    mesh1 = SubDomainMesh(subdomains, (1, 3))
    mesh2 = SubDomainMesh(subdomains, (2, 3))
    mesh12 = OverlapMesh(mesh1, mesh2)

    # FIXME: split the file!
    map1 = mesh12.parent_entity_map[mesh1.id()][2]
    map2 = mesh12.parent_entity_map[mesh2.id()][2]
    # Cell check out
    for c, c1, c2 in zip(df.cells(mesh12), map1, map2):
        assert df.near(c.midpoint().distance(df.Cell(mesh1, c1).midpoint()), 0, 1E-14)
        assert df.near(c.midpoint().distance(df.Cell(mesh2, c2).midpoint()), 0, 1E-14)
        
    # Vertices are not that important but anyways
    x1 = mesh1.coordinates(); map1 = mesh12.parent_entity_map[mesh1.id()][0]
    x2 = mesh2.coordinates(); map2 = mesh12.parent_entity_map[mesh2.id()][0]
    for x, i1, i2 in zip(mesh12.coordinates(), map1, map2):
        assert np.linalg.norm(x - x1[i1]) < 1E-13
        assert np.linalg.norm(x - x2[i2]) < 1E-13
Пример #58
0
def cylinder_average_matrix(V, TV, radius, quad_degree):
    '''Averaging matrix'''
    mesh = V.mesh()
    line_mesh = TV.mesh()
    # We are going to perform the integration with Gauss quadrature at
    # the end (PI u)(x):
    # A cell of mesh (an edge) defines a normal vector. Let P be the plane
    # that is defined by the normal vector n and some point x on Gamma. Let L
    # be the circle that is the intersect of P and S. The value of q (in Q) at x
    # is defined as
    #
    #                    q(x) = (1/|L|)*\int_{L}g(x)*dL
    #
    # which simplifies to g(x) = (1/(2*pi*R))*\int_{-pi}^{pi}u(L)*R*d(theta) and
    # or                       = (1/2) * \int_{-1}^{1} u (L(pi*s)) * ds
    # This can be integrated no problemo once we figure out L. To this end, let
    # t_1 and t_2 be two unit mutually orthogonal vectors that are orthogonal to
    # n. Then L(pi*s) = p + R*t_1*cos(pi*s) + R*t_2*sin(pi*s) can be seen to be
    # such that i) |x-p| = R and ii) x.n = 0 [i.e. this the suitable
    # parametrization]
    
    # Clearly we can scale the weights as well as precompute
    # cos and sin terms.
    xq, wq = leggauss(quad_degree)
    wq *= 0.5
    cos_xq = np.cos(np.pi*xq).reshape((-1, 1))
    sin_xq = np.sin(np.pi*xq).reshape((-1, 1))

    if is_number(radius):
         radius = lambda x, radius=radius: radius 

    mesh_x = TV.mesh().coordinates()
    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    # Eval at points will require serch
    tree = mesh.bounding_box_tree()
    limit = mesh.num_cells()

    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()               
    basis_values = np.zeros(V.element().space_dimension()*value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # Get the tangent => orthogonal tangent vectors

            v0, v1 = mesh_x[line_cell.entities(0)]
            n = v0 - v1

            t1 = np.array([n[1]-n[2], n[2]-n[0], n[0]-n[1]])
    
            t2 = np.cross(n, t1)
            t1 /= np.linalg.norm(t1)
            t2 = t2/np.linalg.norm(t2)

            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]
            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # Get radius and integration points
                rad = radius(avg_point)
         
                integration_points = avg_point + rad*t1*sin_xq + rad*t2*cos_xq

                data = {}
                for index, ip in enumerate(integration_points):
                    c = tree.compute_first_entity_collision(Point(*ip))
                    if c >= limit: continue

                    Vcell = Cell(mesh, c)
                    vertex_coordinates = Vcell.get_vertex_coordinates()
                    cell_orientation = Vcell.orientation()
                    Vel.evaluate_basis_all(basis_values, ip, vertex_coordinates, cell_orientation)

                    cols_ip = V_dm.cell_dofs(c)
                    values_ip = basis_values*wq[index]
                    # Add
                    for col, value in zip(cols_ip, values_ip.reshape((-1, value_size))):
                        if col in data:
                            data[col] += value
                        else:
                            data[col] = value
                            
                # The thing now that with data we can assign to several
                # rows of the matrix
                column_indices = np.array(data.keys(), dtype='int32')
                for shift in range(value_size):
                    row = scalar_row + shift
                    column_values = np.array([data[col][shift] for col in column_indices])
                    mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return PETScMatrix(mat)