Exemple #1
0
    def _build_eval_matrix(V, points):
        """Build the sparse m-by-n matrix that maps a coefficient set for a function in
        V to the values of that function at m given points."""
        # See <https://www.allanswered.com/post/lkbkm/#zxqgk>
        mesh = V.mesh()

        bbt = BoundingBoxTree()
        bbt.build(mesh)
        dofmap = V.dofmap()
        el = V.element()
        sdim = el.space_dimension()

        rows = []
        cols = []
        data = []
        for i, x in enumerate(points):
            cell_id = bbt.compute_first_entity_collision(Point(*x))
            cell = Cell(mesh, cell_id)
            coordinate_dofs = cell.get_vertex_coordinates()

            rows.append(np.full(sdim, i))
            cols.append(dofmap.cell_dofs(cell_id))

            v = el.evaluate_basis_all(x, coordinate_dofs, cell_id)
            data.append(v)

        rows = np.concatenate(rows)
        cols = np.concatenate(cols)
        data = np.concatenate(data)

        m = len(points)
        n = V.dim()
        matrix = sparse.csr_matrix((data, (rows, cols)), shape=(m, n))
        return matrix
Exemple #2
0
class FEBasisFunction(object):
    '''Evaluator of dof of V on functions'''
    def __init__(self, V):
        self.elm = V.element()
        self.mesh = V.mesh()

        shape = V.ufl_element().value_shape()
        degree = V.ufl_element().degree()

        # A fake instanc to talk to with the world
        adapter = type('MiroHack',
                       (Expression, ),
                       {'value_shape': lambda self_, : shape,
                        'eval': lambda self_, values, x: self.eval(values, x)})
        self.__adapter = adapter(degree=degree)

        is_1d_in_3d = self.mesh.topology().dim() == 1 and self.mesh.geometry().dim() == 3
        self.orient_cell = cell_orientation(is_1d_in_3d)

        # Allocs
        self.__cell = Cell(self.mesh, 0)
        self.__cell_vertex_x = self.__cell.get_vertex_coordinates()
        self.__cell_orientation = self.orient_cell(self.__cell)
        self.__dof = 0
        self.__values = np.zeros(V.ufl_element().value_size())

    @property
    def dof(self):
        return self.__dof

    @dof.setter
    def dof(self, value):
        assert value < self.elm.space_dimension()
        self.__dof = value
    
    @property
    def cell(self):
        return self.__cell

    @cell.setter
    def cell(self, value):
        cell_ = Cell(self.mesh, value)
        self.__cell_vertex_x[:] = cell_.get_vertex_coordinates()
        self.__cell_orientation = self.orient_cell(cell_)
        self.__cell = cell_

    def eval(self, values, x):
        self.elm.evaluate_basis(self.dof,
                                values[:], 
                                x,
                                self.__cell_vertex_x,
                                self.__cell_orientation)

    def __call__(self, x):
        self.eval(self.__values, x)
        return 1*self.__values

    def as_expression(self):
        return self.__adapter
Exemple #3
0
class FEBasisFunction(object):
    '''Evaluator of dof of V on functions'''
    def __init__(self, V):
        self.elm = V.element()
        self.mesh = V.mesh()

        shape = V.ufl_element().value_shape()
        degree = V.ufl_element().degree()

        # A fake instanc to talk to with the world
        adapter = type(
            'MiroHack', (Expression, ), {
                'value_shape': lambda self_, : shape,
                'eval': lambda self_, values, x: self.eval(values, x)
            })
        self.__adapter = adapter(degree=degree)

        is_1d_in_3d = self.mesh.topology().dim() == 1 and self.mesh.geometry(
        ).dim() == 3
        self.orient_cell = cell_orientation(is_1d_in_3d)

        # Allocs
        self.__cell = Cell(self.mesh, 0)
        self.__cell_vertex_x = self.__cell.get_vertex_coordinates()
        self.__cell_orientation = self.orient_cell(self.__cell)
        self.__dof = 0
        self.__values = np.zeros(V.ufl_element().value_size())

    @property
    def dof(self):
        return self.__dof

    @dof.setter
    def dof(self, value):
        assert value < self.elm.space_dimension()
        self.__dof = value

    @property
    def cell(self):
        return self.__cell

    @cell.setter
    def cell(self, value):
        cell_ = Cell(self.mesh, value)
        self.__cell_vertex_x[:] = cell_.get_vertex_coordinates()
        self.__cell_orientation = self.orient_cell(cell_)
        self.__cell = cell_

    def eval(self, values, x):
        self.elm.evaluate_basis(self.dof, values[:], x, self.__cell_vertex_x,
                                self.__cell_orientation)

    def __call__(self, x):
        self.eval(self.__values, x)
        return 1 * self.__values

    def as_expression(self):
        return self.__adapter
Exemple #4
0
    def __init__(self, u, locations, t0=0.0, record=''):
        # The idea here is that u(x) means: search for cell containing x,
        # evaluate the basis functions of that element at x, restrict
        # the coef vector of u to the cell. Of these 3 steps the first
        # two don't change. So we cache them
        # Check the scalar assumption
        assert u.value_rank() == 0 and u.value_size() == 1

        # Locate each point
        mesh = u.function_space().mesh()
        limit = mesh.num_entities_global(mesh.topology().dim())
        bbox_tree = mesh.bounding_box_tree()

        cells_for_x = [None] * len(locations)
        for i, x in enumerate(locations):
            cell = bbox_tree.compute_first_entity_collision(Point(*x))
            if -1 < cell < limit:
                cells_for_x[i] = cell
        # Ignore the cells that are not in the mesh. Note that we don't
        # care if a node is found in several cells -l think CPU interface
        xs_cells = filter(lambda (xi, c): c is not None,
                          zip(locations, cells_for_x))

        V = u.function_space()
        element = V.dolfin_element()
        coefficients = np.zeros(element.space_dimension())
        # I build a series of functions bound to right variables that
        # when called compute the value at x
        evals = []
        locations = []
        for x, ci in xs_cells:
            basis_matrix = np.zeros(element.space_dimension())

            cell = Cell(mesh, ci)
            vertex_coords, orientation = cell.get_vertex_coordinates(
            ), cell.orientation()
            # Eval the basis once
            element.evaluate_basis_all(basis_matrix, x, vertex_coords,
                                       orientation)

            def foo(A=basis_matrix, cell=cell, vc=vertex_coords):
                # Restrict for each call using the bound cell, vc ...
                u.restrict(coefficients, element, cell, vc, cell)
                # A here is bound to the right basis_matri
                return np.dot(A, coefficients)

            evals.append(foo)
            locations.append(x)

        self.probes = evals
        self.locations = locations
        self.rank = MPI.rank(mesh.mpi_comm())
        self.data = []
        self.record = record
        # Make the initial record
        self.probe(t=t0)
Exemple #5
0
class DegreeOfFreedom(object):
    '''Evaluator of dof of V on functions'''
    def __init__(self, V):
        self.elm = V.element()
        self.mesh = V.mesh()

        is_1d_in_3d = self.mesh.topology().dim() == 1 and self.mesh.geometry().dim() == 3
        self.orient_cell = cell_orientation(is_1d_in_3d)

        # Allocs
        self.__cell = Cell(self.mesh, 0)
        self.__cell_vertex_x = self.__cell.get_vertex_coordinates()
        self.__cell_orientation = self.orient_cell(self.__cell)
        self.__dof = 0

    @property
    def dof(self):
        return self.__dof

    @dof.setter
    def dof(self, value):
        assert value < self.elm.space_dimension()
        self.__dof = value
    
    @property
    def cell(self):
        return self.__cell

    @cell.setter
    def cell(self, value):
        cell_ = Cell(self.mesh, value)
        self.__cell_vertex_x[:] = cell_.get_vertex_coordinates()
        self.__cell_orientation = self.orient_cell(cell_)
        self.__cell = cell_

    def eval(self, f):
        return self.elm.evaluate_dofs(f,
                                      self.__cell_vertex_x,
                                      self.__cell_orientation,
                                      self.__cell)[self.dof]

    def eval_dofs(self, f):
        return self.elm.evaluate_dofs(f,
                                      self.__cell_vertex_x,
                                      self.__cell_orientation,
                                      self.__cell)
Exemple #6
0
class FEBasisFunction(UserExpression):
    '''Evaluator of dof of V on functions'''
    def __init__(self, V, **kwargs):
        super().__init__(self, element=V.ufl_element(), **kwargs)
        self.elm = V.element()
        self.mesh = V.mesh()

        is_1d_in_3d = self.mesh.topology().dim() == 1 and self.mesh.geometry().dim() == 3
        self.orient_cell = cell_orientation(is_1d_in_3d)

        # Allocs
        self.__cell = Cell(self.mesh, 0)
        self.__cell_vertex_x = self.__cell.get_vertex_coordinates()
        self.__cell_orientation = self.orient_cell(self.__cell)
        self.__dof = 0
        self.__values = np.zeros(V.ufl_element().value_size())

    @property
    def dof(self):
        return self.__dof

    @dof.setter
    def dof(self, value):
        assert value < self.elm.space_dimension()
        self.__dof = value
    
    @property
    def cell(self):
        return self.__cell

    @cell.setter
    def cell(self, value):
        cell_ = Cell(self.mesh, value)
        self.__cell_vertex_x[:] = cell_.get_vertex_coordinates()
        self.__cell_orientation = self.orient_cell(cell_)
        self.__cell = cell_

    def eval_cell(self, values, x, cell):
        values[:] = self.elm.evaluate_basis(self.dof,
                                            x,
                                            self.__cell_vertex_x,
                                            self.__cell_orientation)

    def __call__(self, x):
        self.eval(self.__values, x)
        return 1*self.__values
Exemple #7
0
def point_trace_matrix(V, TV, x0):
    '''
    Let u in V; u = ck phi_k then u(x0) \in TV = ck phi_k(x0). So this 
    is a 1 by dim(V) matrix where the column values are phi_k(x0).
    '''
    mesh = V.mesh()
    tree = mesh.bounding_box_tree()
    cell = tree.compute_first_entity_collision(Point(*x0))
    assert cell < mesh.num_cells()

    # Cell for restriction
    Vcell = Cell(mesh, cell)
    vertex_coordinates = Vcell.get_vertex_coordinates()
    cell_orientation = Vcell.orientation()
    x0 = np.fromiter(x0, dtype=float)

    # Columns - get all components at once
    all_dofs = V.dofmap().cell_dofs(cell).tolist()
    Vel = V.element()
    value_size = V.ufl_element().value_size()
    basis_values = np.zeros(V.element().space_dimension() * value_size)

    Vel.evaluate_basis_all(basis_values, x0, vertex_coordinates,
                           cell_orientation)

    with petsc_serial_matrix(TV, V) as mat:

        # Scalar gets all
        if value_size == 1:
            component_dofs = lambda component: V.dofmap().cell_dofs(cell)
        # Slices
        else:
            component_dofs = lambda component: V.sub(component).dofmap(
            ).cell_dofs(cell)

        for row in map(int, TV.dofmap().cell_dofs(cell)):  # R^n components
            sub_dofs = component_dofs(row)
            sub_dofs_local = [all_dofs.index(dof) for dof in sub_dofs]
            print row, sub_dofs, sub_dofs_local, basis_values[sub_dofs_local]

            mat.setValues([row], sub_dofs, basis_values[sub_dofs_local],
                          PETSc.InsertMode.INSERT_VALUES)
    return mat
Exemple #8
0
class DegreeOfFreedom(object):
    '''Evaluator of dof of V on functions'''
    def __init__(self, V):
        self.elm = V.element()
        self.mesh = V.mesh()

        is_1d_in_3d = self.mesh.topology().dim() == 1 and self.mesh.geometry().dim() == 3
        self.orient_cell = cell_orientation(is_1d_in_3d)

        # Allocs
        self.__cell = Cell(self.mesh, 0)
        self.__cell_vertex_x = self.__cell.get_vertex_coordinates()
        self.__cell_orientation = self.orient_cell(self.__cell)
        self.__dof = 0

    @property
    def dof(self):
        return self.__dof

    @dof.setter
    def dof(self, value):
        assert value < self.elm.space_dimension()
        self.__dof = value
    
    @property
    def cell(self):
        return self.__cell

    @cell.setter
    def cell(self, value):
        cell_ = Cell(self.mesh, value)
        self.__cell_vertex_x[:] = cell_.get_vertex_coordinates()
        self.__cell_orientation = self.orient_cell(cell_)
        self.__cell = cell_

    def eval(self, f):
        return self.elm.evaluate_dof(self.dof,
                                     f.as_expression() if isinstance(f, FEBasisFunction) else f,
                                     self.__cell_vertex_x,
                                     self.__cell_orientation,
                                     self.__cell)
def point_trace_matrix(V, TV, x0):
    '''
    Let u in V; u = ck phi_k then u(x0) \in TV = ck phi_k(x0). So this 
    is a 1 by dim(V) matrix where the column values are phi_k(x0).
    '''
    mesh = V.mesh()
    tree = mesh.bounding_box_tree()
    cell = tree.compute_first_entity_collision(Point(*x0))
    assert cell < mesh.num_cells()

    # Cell for restriction
    Vcell = Cell(mesh, cell)
    vertex_coordinates = Vcell.get_vertex_coordinates()
    cell_orientation = Vcell.orientation()
    x0 = np.fromiter(x0, dtype=float)

    # Columns - get all components at once
    all_dofs = V.dofmap().cell_dofs(cell).tolist()
    Vel = V.element()
    value_size = V.ufl_element().value_size()
    basis_values = np.zeros(V.element().space_dimension()*value_size)

    Vel.evaluate_basis_all(basis_values, x0, vertex_coordinates, cell_orientation)

    with petsc_serial_matrix(TV, V) as mat:

        # Scalar gets all
        if value_size == 1:
            component_dofs = lambda component: V.dofmap().cell_dofs(cell)
        # Slices
        else:
            component_dofs = lambda component: V.sub(component).dofmap().cell_dofs(cell)
        
        for row in map(int, TV.dofmap().cell_dofs(cell)):  # R^n components
            sub_dofs = component_dofs(row)
            sub_dofs_local = [all_dofs.index(dof) for dof in sub_dofs]
            print row, sub_dofs, sub_dofs_local, basis_values[sub_dofs_local]
            
            mat.setValues([row], sub_dofs, basis_values[sub_dofs_local],
                          PETSc.InsertMode.INSERT_VALUES)
    return mat
Exemple #10
0
def build_grad_matrices(V, points):
    """Build the sparse m-by-n matrices that map a coefficient set for a function in V
    to the values of dx and dy at a number m of points.
    """
    # See <https://www.allanswered.com/post/lkbkm/#zxqgk>
    mesh = V.mesh()

    bbt = BoundingBoxTree()
    bbt.build(mesh)
    dofmap = V.dofmap()
    el = V.element()
    rows = []
    cols = []
    datax = []
    datay = []
    for i, xy in enumerate(points):
        cell_id = bbt.compute_first_entity_collision(Point(*xy))
        cell = Cell(mesh, cell_id)
        coordinate_dofs = cell.get_vertex_coordinates()

        rows.append([i, i, i])
        cols.append(dofmap.cell_dofs(cell_id))

        v = el.evaluate_basis_derivatives_all(1, xy, coordinate_dofs, cell_id)
        v = v.reshape(3, 2)
        datax.append(v[:, 0])
        datay.append(v[:, 1])

    rows = numpy.concatenate(rows)
    cols = numpy.concatenate(cols)
    datax = numpy.concatenate(datax)
    datay = numpy.concatenate(datay)

    m = len(points)
    n = V.dim()
    dx_matrix = sparse.csr_matrix((datax, (rows, cols)), shape=(m, n))
    dy_matrix = sparse.csr_matrix((datay, (rows, cols)), shape=(m, n))
    return dx_matrix, dy_matrix
Exemple #11
0
 def cell(self, value):
     cell_ = Cell(self.mesh, value)
     self.__cell_vertex_x[:] = cell_.get_vertex_coordinates()
     self.__cell_orientation = self.orient_cell(cell_)
     self.__cell = cell_
Exemple #12
0
def cylinder_average_matrix(V, TV, radius, quad_degree):
    '''Averaging matrix'''
    mesh = V.mesh()
    line_mesh = TV.mesh()
    # We are going to perform the integration with Gauss quadrature at
    # the end (PI u)(x):
    # A cell of mesh (an edge) defines a normal vector. Let P be the plane
    # that is defined by the normal vector n and some point x on Gamma. Let L
    # be the circle that is the intersect of P and S. The value of q (in Q) at x
    # is defined as
    #
    #                    q(x) = (1/|L|)*\int_{L}g(x)*dL
    #
    # which simplifies to g(x) = (1/(2*pi*R))*\int_{-pi}^{pi}u(L)*R*d(theta) and
    # or                       = (1/2) * \int_{-1}^{1} u (L(pi*s)) * ds
    # This can be integrated no problemo once we figure out L. To this end, let
    # t_1 and t_2 be two unit mutually orthogonal vectors that are orthogonal to
    # n. Then L(pi*s) = p + R*t_1*cos(pi*s) + R*t_2*sin(pi*s) can be seen to be
    # such that i) |x-p| = R and ii) x.n = 0 [i.e. this the suitable
    # parametrization]
    
    # Clearly we can scale the weights as well as precompute
    # cos and sin terms.
    xq, wq = leggauss(quad_degree)
    wq *= 0.5
    cos_xq = np.cos(np.pi*xq).reshape((-1, 1))
    sin_xq = np.sin(np.pi*xq).reshape((-1, 1))

    if is_number(radius):
         radius = lambda x, radius=radius: radius 

    mesh_x = TV.mesh().coordinates()
    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    # Eval at points will require serch
    tree = mesh.bounding_box_tree()
    limit = mesh.num_cells()

    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()               
    basis_values = np.zeros(V.element().space_dimension()*value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # Get the tangent => orthogonal tangent vectors

            v0, v1 = mesh_x[line_cell.entities(0)]
            n = v0 - v1

            t1 = np.array([n[1]-n[2], n[2]-n[0], n[0]-n[1]])
    
            t2 = np.cross(n, t1)
            t1 /= np.linalg.norm(t1)
            t2 = t2/np.linalg.norm(t2)

            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]
            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # Get radius and integration points
                rad = radius(avg_point)
         
                integration_points = avg_point + rad*t1*sin_xq + rad*t2*cos_xq

                data = {}
                for index, ip in enumerate(integration_points):
                    c = tree.compute_first_entity_collision(Point(*ip))
                    if c >= limit: continue

                    Vcell = Cell(mesh, c)
                    vertex_coordinates = Vcell.get_vertex_coordinates()
                    cell_orientation = Vcell.orientation()
                    Vel.evaluate_basis_all(basis_values, ip, vertex_coordinates, cell_orientation)

                    cols_ip = V_dm.cell_dofs(c)
                    values_ip = basis_values*wq[index]
                    # Add
                    for col, value in zip(cols_ip, values_ip.reshape((-1, value_size))):
                        if col in data:
                            data[col] += value
                        else:
                            data[col] = value
                            
                # The thing now that with data we can assign to several
                # rows of the matrix
                column_indices = np.array(data.keys(), dtype='int32')
                for shift in range(value_size):
                    row = scalar_row + shift
                    column_values = np.array([data[col][shift] for col in column_indices])
                    mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return PETScMatrix(mat)
Exemple #13
0
def sphere_average_matrix(V, TV, radius, quad_degree):
    '''Averaging matrix over the sphere'''
    mesh = V.mesh()
    line_mesh = TV.mesh()
    # Lebedev below need off degrees
    if quad_degree % 2 == 0: quad_degree += 1
    # NOTE: this is a dependency
    from quadpy.sphere import Lebedev

    integrator = Lebedev(quad_degree)
    xq = integrator.points
    wq = integrator.weights
    
    if is_number(radius):
         radius = lambda x, radius=radius: radius 

    mesh_x = TV.mesh().coordinates()
    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    # Eval at points will require serch
    tree = mesh.bounding_box_tree()
    limit = mesh.num_cells()

    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()               
    basis_values = np.zeros(V.element().space_dimension()*value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]
            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # Get radius and integration points
                rad = radius(avg_point)
                # Scale and shift the unit sphere to the point
                integration_points = xq*rad + avg_point

                data = {}
                for index, ip in enumerate(integration_points):
                    c = tree.compute_first_entity_collision(Point(*ip))
                    if c >= limit: continue

                    Vcell = Cell(mesh, c)
                    vertex_coordinates = Vcell.get_vertex_coordinates()
                    cell_orientation = Vcell.orientation()
                    Vel.evaluate_basis_all(basis_values, ip, vertex_coordinates, cell_orientation)

                    cols_ip = V_dm.cell_dofs(c)
                    values_ip = basis_values*wq[index]
                    # Add
                    for col, value in zip(cols_ip, values_ip.reshape((-1, value_size))):
                        if col in data:
                            data[col] += value
                        else:
                            data[col] = value
                            
                # The thing now that with data we can assign to several
                # rows of the matrix
                column_indices = np.array(data.keys(), dtype='int32')
                for shift in range(value_size):
                    row = scalar_row + shift
                    column_values = np.array([data[col][shift] for col in column_indices])
                    mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return PETScMatrix(mat)
Exemple #14
0
def trace_3d1d_matrix(V, TV, reduced_mesh):
    '''Trace from 3d to 1d. Makes sense only for CG space'''
    assert reduced_mesh.id() == TV.mesh().id()
    assert V.ufl_element().family() == 'Lagrange'
    
    mesh = V.mesh()
    line_mesh = TV.mesh()
    
    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    # We use the map to get (1d cell -> [3d edge) -> 3d cell]
    if hasattr(reduced_mesh, 'parent_entity_map'):
        # ( )
        mapping = reduced_mesh.parent_entity_map[mesh.id()][1]
        # [ ]
        mesh.init(1)
        mesh.init(1, 3)
        e2c = mesh.topology()(1, 3)
        # From 1d cell (by index)
        get_cell3d = lambda c, d1d3=mapping, d3d3=e2c: d3d3(d1d3[c.index()])[0]
    # Tree collision by midpoint
    else:
        tree = mesh.bounding_box_tree()
        limit = mesh.num_cells()

        get_cell3d = lambda c, tree=tree, bound=limit: (
            lambda index: index if index<bound else None
        )(tree.compute_first_entity_collision(c.midpoint()))
  
    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()               
    basis_values = np.zeros(V.element().space_dimension()*value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # Get the tangent => orthogonal tangent vectors
            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]

            # Let's get a 3d cell to use for getting the V values
            # CG assumption allows taking any
            tet_cell = get_cell3d(line_cell)
            if tet_cell is None: continue
            
            Vcell = Cell(mesh, tet_cell)
            vertex_coordinates = Vcell.get_vertex_coordinates()
            cell_orientation = 0
            # Columns are determined by V cell! I guess the sparsity
            # could be improved if for x_dofs of TV only x_dofs of V
            # were considered
            column_indices = np.array(V_dm.cell_dofs(tet_cell), dtype='int32')

            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # 3d at point
                Vel.evaluate_basis_all(basis_values, avg_point, vertex_coordinates, cell_orientation)
                # The thing now is that with data we can assign to several
                # rows of the matrix. Shift determines the (x, y, ... ) or
                # (xx, xy, yx, ...) component of Q
                data = basis_values.reshape((-1, value_size)).T
                for shift, column_values in enumerate(data):
                    row = scalar_row + shift
                    mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return PETScMatrix(mat)
Exemple #15
0
def cylinder_average_matrix(V, TV, radius, quad_degree):
    '''Averaging matrix'''
    mesh = V.mesh()
    line_mesh = TV.mesh()
    # We are going to perform the integration with Gauss quadrature at
    # the end (PI u)(x):
    # A cell of mesh (an edge) defines a normal vector. Let P be the plane
    # that is defined by the normal vector n and some point x on Gamma. Let L
    # be the circle that is the intersect of P and S. The value of q (in Q) at x
    # is defined as
    #
    #                    q(x) = (1/|L|)*\int_{L}g(x)*dL
    #
    # which simplifies to g(x) = (1/(2*pi*R))*\int_{-pi}^{pi}u(L)*R*d(theta) and
    # or                       = (1/2) * \int_{-1}^{1} u (L(pi*s)) * ds
    # This can be integrated no problemo once we figure out L. To this end, let
    # t_1 and t_2 be two unit mutually orthogonal vectors that are orthogonal to
    # n. Then L(pi*s) = p + R*t_1*cos(pi*s) + R*t_2*sin(pi*s) can be seen to be
    # such that i) |x-p| = R and ii) x.n = 0 [i.e. this the suitable
    # parametrization]

    # Clearly we can scale the weights as well as precompute
    # cos and sin terms.
    xq, wq = leggauss(quad_degree)
    wq *= 0.5
    cos_xq = np.cos(np.pi * xq).reshape((-1, 1))
    sin_xq = np.sin(np.pi * xq).reshape((-1, 1))

    if is_number(radius):
        radius = lambda x, radius=radius: radius

    mesh_x = TV.mesh().coordinates()
    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    # Eval at points will require serch
    tree = mesh.bounding_box_tree()
    limit = mesh.num_cells()

    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()
    basis_values = np.zeros(V.element().space_dimension() * value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # Get the tangent => orthogonal tangent vectors

            v0, v1 = mesh_x[line_cell.entities(0)]
            n = v0 - v1

            t1 = np.array([n[1] - n[2], n[2] - n[0], n[0] - n[1]])

            t2 = np.cross(n, t1)
            t1 /= np.linalg.norm(t1)
            t2 = t2 / np.linalg.norm(t2)

            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]
            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # Get radius and integration points
                rad = radius(avg_point)

                integration_points = avg_point + rad * t1 * sin_xq + rad * t2 * cos_xq

                data = {}
                for index, ip in enumerate(integration_points):
                    c = tree.compute_first_entity_collision(Point(*ip))
                    if c >= limit: continue

                    Vcell = Cell(mesh, c)
                    vertex_coordinates = Vcell.get_vertex_coordinates()
                    cell_orientation = Vcell.orientation()
                    Vel.evaluate_basis_all(basis_values, ip,
                                           vertex_coordinates,
                                           cell_orientation)

                    cols_ip = V_dm.cell_dofs(c)
                    values_ip = basis_values * wq[index]
                    # Add
                    for col, value in zip(cols_ip,
                                          values_ip.reshape((-1, value_size))):
                        if col in data:
                            data[col] += value
                        else:
                            data[col] = value

                # The thing now that with data we can assign to several
                # rows of the matrix
                column_indices = np.array(data.keys(), dtype='int32')
                for shift in range(value_size):
                    row = scalar_row + shift
                    column_values = np.array(
                        [data[col][shift] for col in column_indices])
                    mat.setValues([row], column_indices, column_values,
                                  PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return PETScMatrix(mat)
Exemple #16
0
def sphere_average_matrix(V, TV, radius, quad_degree):
    '''Averaging matrix over the sphere'''
    mesh = V.mesh()
    line_mesh = TV.mesh()
    # Lebedev below need off degrees
    if quad_degree % 2 == 0: quad_degree += 1
    # NOTE: this is a dependency
    from quadpy.sphere import Lebedev

    integrator = Lebedev(quad_degree)
    xq = integrator.points
    wq = integrator.weights

    if is_number(radius):
        radius = lambda x, radius=radius: radius

    mesh_x = TV.mesh().coordinates()
    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    # Eval at points will require serch
    tree = mesh.bounding_box_tree()
    limit = mesh.num_cells()

    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()
    basis_values = np.zeros(V.element().space_dimension() * value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]
            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # Get radius and integration points
                rad = radius(avg_point)
                # Scale and shift the unit sphere to the point
                integration_points = xq * rad + avg_point

                data = {}
                for index, ip in enumerate(integration_points):
                    c = tree.compute_first_entity_collision(Point(*ip))
                    if c >= limit: continue

                    Vcell = Cell(mesh, c)
                    vertex_coordinates = Vcell.get_vertex_coordinates()
                    cell_orientation = Vcell.orientation()
                    Vel.evaluate_basis_all(basis_values, ip,
                                           vertex_coordinates,
                                           cell_orientation)

                    cols_ip = V_dm.cell_dofs(c)
                    values_ip = basis_values * wq[index]
                    # Add
                    for col, value in zip(cols_ip,
                                          values_ip.reshape((-1, value_size))):
                        if col in data:
                            data[col] += value
                        else:
                            data[col] = value

                # The thing now that with data we can assign to several
                # rows of the matrix
                column_indices = np.array(data.keys(), dtype='int32')
                for shift in range(value_size):
                    row = scalar_row + shift
                    column_values = np.array(
                        [data[col][shift] for col in column_indices])
                    mat.setValues([row], column_indices, column_values,
                                  PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return PETScMatrix(mat)
Exemple #17
0
def trace_3d1d_matrix(V, TV, reduced_mesh):
    '''Trace from 3d to 1d. Makes sense only for CG space'''
    assert reduced_mesh.id() == TV.mesh().id()
    assert V.ufl_element().family() == 'Lagrange'

    mesh = V.mesh()
    line_mesh = TV.mesh()

    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    # We use the map to get (1d cell -> [3d edge) -> 3d cell]
    if hasattr(reduced_mesh, 'parent_entity_map'):
        # ( )
        mapping = reduced_mesh.parent_entity_map[mesh.id()][1]
        # [ ]
        mesh.init(1)
        mesh.init(1, 3)
        e2c = mesh.topology()(1, 3)
        # From 1d cell (by index)
        get_cell3d = lambda c, d1d3=mapping, d3d3=e2c: d3d3(d1d3[c.index()])[0]
    # Tree collision by midpoint
    else:
        tree = mesh.bounding_box_tree()
        limit = mesh.num_cells()

        get_cell3d = lambda c, tree=tree, bound=limit: (
            lambda index: index if index < bound else None)(
                tree.compute_first_entity_collision(c.midpoint()))

    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()
    basis_values = np.zeros(V.element().space_dimension() * value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # Get the tangent => orthogonal tangent vectors
            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]

            # Let's get a 3d cell to use for getting the V values
            # CG assumption allows taking any
            tet_cell = get_cell3d(line_cell)
            if tet_cell is None: continue

            Vcell = Cell(mesh, tet_cell)
            vertex_coordinates = Vcell.get_vertex_coordinates()
            cell_orientation = 0
            # Columns are determined by V cell! I guess the sparsity
            # could be improved if for x_dofs of TV only x_dofs of V
            # were considered
            column_indices = np.array(V_dm.cell_dofs(tet_cell), dtype='int32')

            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # 3d at point
                Vel.evaluate_basis_all(basis_values, avg_point,
                                       vertex_coordinates, cell_orientation)
                # The thing now is that with data we can assign to several
                # rows of the matrix. Shift determines the (x, y, ... ) or
                # (xx, xy, yx, ...) component of Q
                data = basis_values.reshape((-1, value_size)).T
                for shift, column_values in enumerate(data):
                    row = scalar_row + shift
                    mat.setValues([row], column_indices, column_values,
                                  PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return PETScMatrix(mat)
Exemple #18
0
def average_matrix(V, TV, shape):
    '''
    Averaging matrix for reduction of g in V to TV by integration over shape.
    '''
    # We build a matrix representation of u in V -> Pi(u) in TV where
    #
    # Pi(u)(s) = |L(s)|^-1*\int_{L(s)}u(t) dx(s)
    #
    # Here L is the shape over which u is integrated for reduction.
    # Its measure is |L(s)|.

    mesh_x = TV.mesh().coordinates()
    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    mesh = V.mesh()
    # Eval at points will require serch
    tree = mesh.bounding_box_tree()
    limit = mesh.num_cells()

    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    line_mesh = TV.mesh()

    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()
    basis_values = np.zeros(V.element().space_dimension() * value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # Get the tangent (normal of the plane which cuts the virtual
            # surface to yield the bdry curve
            v0, v1 = mesh_x[line_cell.entities(0)]
            n = v0 - v1

            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]
            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # Avg point here has the role of 'height' coordinate
                quadrature = shape.quadrature(avg_point, n)
                integration_points = quadrature.points
                wq = quadrature.weights

                curve_measure = sum(wq)

                data = {}
                for index, ip in enumerate(integration_points):
                    c = tree.compute_first_entity_collision(Point(*ip))
                    if c >= limit: continue

                    cs = tree.compute_entity_collisions(Point(*ip))
                    # assert False
                    for c in cs[:1]:
                        Vcell = Cell(mesh, c)
                        vertex_coordinates = Vcell.get_vertex_coordinates()
                        cell_orientation = Vcell.orientation()
                        basis_values[:] = Vel.evaluate_basis_all(
                            ip, vertex_coordinates, cell_orientation)

                        cols_ip = V_dm.cell_dofs(c)
                        values_ip = basis_values * wq[index]
                        # Add
                        for col, value in zip(
                                cols_ip, values_ip.reshape((-1, value_size))):
                            if col in data:
                                data[col] += value / curve_measure
                            else:
                                data[col] = value / curve_measure

                # The thing now that with data we can assign to several
                # rows of the matrix
                column_indices = np.array(list(data.keys()), dtype='int32')
                for shift in range(value_size):
                    row = scalar_row + shift
                    column_values = np.array(
                        [data[col][shift] for col in column_indices])
                    mat.setValues([row], column_indices, column_values,
                                  PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return mat
def surface_average_matrix(V, TV, bdry_curve):
    '''Averaging matrix'''
    mesh = V.mesh()
    line_mesh = TV.mesh()
    # We build a matrix representation of u in V -> Pi(u) in TV where
    #
    # Pi(u)(s) = |L(s)|^-1*\int_{L(s)}u(t) dL(s)
    #
    # Here L represents a curve bounding the surface at 'height' s.
    #
    # We do this numerically as |L(s)|^-1*\sum_q u(x_q)*w_q

    # Weights remaing fixed
    wq = bdry_curve.weights

    mesh_x = TV.mesh().coordinates()
    # The idea for point evaluation/computing dofs of TV is to minimize
    # the number of evaluation. I mean a vector dof if done naively would
    # have to evaluate at same x number of component times.
    value_size = TV.ufl_element().value_size()

    # Eval at points will require serch
    tree = mesh.bounding_box_tree()
    limit = mesh.num_cells()

    TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1))
    TV_dm = TV.dofmap()
    V_dm = V.dofmap()
    # For non scalar we plan to make compoenents by shift
    if value_size > 1:
        TV_dm = TV.sub(0).dofmap()

    Vel = V.element()
    basis_values = np.zeros(V.element().space_dimension() * value_size)
    with petsc_serial_matrix(TV, V) as mat:

        for line_cell in cells(line_mesh):
            # Get the tangent (normal of the plane which cuts the virtual
            # surface to yield the bdry curve
            v0, v1 = mesh_x[line_cell.entities(0)]
            n = v0 - v1
            # We can specialize quadrature points; we can have several
            # height points with same normal
            pts_at_n = bdry_curve.points(n)
            len_at_n = bdry_curve.length(n)

            # The idea is now to minimize the point evaluation
            scalar_dofs = TV_dm.cell_dofs(line_cell.index())
            scalar_dofs_x = TV_coordinates[scalar_dofs]
            for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x):
                # Avg point here has the role of 'height' coordinate
                integration_points = pts_at_n(avg_point)
                len_bdry_curve = len_at_n(avg_point)

                data = {}
                for index, ip in enumerate(integration_points):
                    c = tree.compute_first_entity_collision(Point(*ip))
                    if c >= limit: continue

                    Vcell = Cell(mesh, c)
                    vertex_coordinates = Vcell.get_vertex_coordinates()
                    cell_orientation = Vcell.orientation()
                    Vel.evaluate_basis_all(basis_values, ip,
                                           vertex_coordinates,
                                           cell_orientation)

                    cols_ip = V_dm.cell_dofs(c)
                    values_ip = basis_values * wq[index]
                    # Add
                    for col, value in zip(cols_ip,
                                          values_ip.reshape((-1, value_size))):
                        if col in data:
                            data[col] += value / len_bdry_curve
                        else:
                            data[col] = value / len_bdry_curve

                # The thing now that with data we can assign to several
                # rows of the matrix
                column_indices = np.array(data.keys(), dtype='int32')
                for shift in range(value_size):
                    row = scalar_row + shift
                    column_values = np.array(
                        [data[col][shift] for col in column_indices])
                    mat.setValues([row], column_indices, column_values,
                                  PETSc.InsertMode.INSERT_VALUES)
            # On to next avg point
        # On to next cell
    return PETScMatrix(mat)
Exemple #20
0
 def cell(self, value):
     cell_ = Cell(self.mesh, value)
     self.__cell_vertex_x[:] = cell_.get_vertex_coordinates()
     self.__cell_orientation = self.orient_cell(cell_)
     self.__cell = cell_