Exemple #1
0
def circle_random2(n, radius, seed=0):
    """Boundary points are random, too."""
    # generate random points in circle; <http://mathworld.wolfram.com/DiskPointPicking.html>
    numpy.random.seed(seed)
    r = numpy.random.rand(n)
    alpha = 2 * numpy.pi * numpy.random.rand(n)

    pts = numpy.column_stack(
        [numpy.sqrt(r) * numpy.cos(alpha), numpy.sqrt(r) * numpy.sin(alpha)]
    )
    tri = Delaunay(pts)
    # Make sure there are exactly `n` boundary points
    mesh = MeshTri(pts, tri.simplices)
    # inflate the mesh such that the boundary points average around the radius
    boundary_pts = pts[mesh.is_boundary_point]
    dist = numpy.sqrt(numpy.einsum("ij,ij->i", boundary_pts, boundary_pts))
    avg_dist = numpy.sum(dist) / len(dist)
    mesh.points = pts / avg_dist
    # boundary_pts = pts[mesh.is_boundary_point]
    # dist = numpy.sqrt(numpy.einsum("ij,ij->i", boundary_pts, boundary_pts))
    # avg_dist = numpy.sum(dist) / len(dist)
    # print(avg_dist)

    # now move all boundary points to the circle
    # bpts = pts[mesh.is_boundary_point]
    # pts[mesh.is_boundary_point] = (
    #     bpts.T / numpy.sqrt(numpy.einsum("ij,ij->i", bpts, bpts))
    # ).T
    # bpts = pts[mesh.is_boundary_point]
    # print(numpy.sqrt(numpy.einsum("ij,ij->i", bpts, bpts)))
    # mesh = MeshTri(pts, tri.simplices)
    # mesh.show()

    return pts, tri.simplices
Exemple #2
0
def jac_uniform(X, cells):
    """The approximated Jacobian is

      partial_i E = 2/(d+1) (x_i int_{omega_i} rho(x) dx - int_{omega_i} x rho(x) dx)
                  = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_{j, rho}) int_{tau_j} rho,

    see Chen-Holst. This method here assumes uniform density, rho(x) = 1, such that

      partial_i E = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|

    with b_j being the ordinary barycenter.
    """
    dim = 2
    mesh = MeshTri(X, cells)

    jac = numpy.zeros(X.shape)
    for k in range(mesh.cells["nodes"].shape[1]):
        i = mesh.cells["nodes"][:, k]
        vals = (mesh.node_coords[i] -
                mesh.cell_barycenters).T * mesh.cell_volumes
        # numpy.add.at(jac, i, vals)
        jac += numpy.array(
            [numpy.bincount(i, val, minlength=jac.shape[0]) for val in vals]).T

    return 2 / (dim + 1) * jac
Exemple #3
0
def quasi_newton_uniform(points, cells, *args, **kwargs):
    """Like linear_solve above, but assuming rho==1. Note that the energy gradient

        \\partial E_i = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) \\int_{tau_j} rho

    becomes

        \\partial E_i = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|.

    Because of the dependence of |tau_j| on the point coordinates, this is a nonlinear
    problem.

    This method makes the simplifying assumption that |tau_j| does in fact _not_ depend
    on the point coordinates. With this, one still only needs to solve a linear system.
    """
    def get_new_points(mesh):
        # do one Newton step
        # TODO need copy?
        x = mesh.node_coords.copy()
        cells = mesh.cells["nodes"]
        jac_x = jac_uniform(x, cells)
        x -= solve_hessian_approx_uniform(x, cells, jac_x)
        return x

    mesh = MeshTri(points, cells)
    runner(
        get_new_points,
        mesh,
        *args,
        **kwargs,
        method_name=
        "Centroidal Patch Tesselation (CPT), uniform density, quasi-Newton variant"
    )
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #4
0
def fixed_point(points, cells, *args, **kwargs):
    """Perform k steps of Laplacian smoothing to the mesh, i.e., moving each
    interior vertex to the arithmetic average of its neighboring points.
    """
    def get_new_points(mesh):
        # move interior points into average of their neighbors

        # old way:
        # num_neighbors = numpy.zeros(n, dtype=int)
        # numpy.add.at(num_neighbors, idx, numpy.ones(idx.shape, dtype=int))
        # new_points = numpy.zeros(mesh.node_coords.shape)
        # numpy.add.at(new_points, idx[:, 0], mesh.node_coords[idx[:, 1]])
        # numpy.add.at(new_points, idx[:, 1], mesh.node_coords[idx[:, 0]])

        n = mesh.node_coords.shape[0]
        idx = mesh.edges["nodes"]
        num_neighbors = numpy.bincount(idx.reshape(-1), minlength=n)

        new_points = numpy.zeros(mesh.node_coords.shape)
        vals = mesh.node_coords[idx[:, 1]].T
        new_points += numpy.array(
            [numpy.bincount(idx[:, 0], val, minlength=n) for val in vals]).T
        vals = mesh.node_coords[idx[:, 0]].T
        new_points += numpy.array(
            [numpy.bincount(idx[:, 1], val, minlength=n) for val in vals]).T
        new_points /= num_neighbors[:, None]

        # reset boundary nodes
        idx = mesh.is_boundary_node
        new_points[idx] = mesh.node_coords[idx]
        return new_points

    mesh = MeshTri(points, cells)
    runner(get_new_points, mesh, *args, **kwargs)
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #5
0
def linear_solve_density_preserving(points, cells, *args, **kwargs):
    def get_new_points(mesh, tol=1.0e-10):
        matrix = _build_graph_laplacian(mesh)

        n = mesh.node_coords.shape[0]
        rhs = numpy.zeros((n, mesh.node_coords.shape[1]))
        rhs[mesh.is_boundary_node] = mesh.node_coords[mesh.is_boundary_node]

        out = scipy.sparse.linalg.spsolve(matrix, rhs)

        # PyAMG fails on circleci.
        # ml = pyamg.ruge_stuben_solver(matrix)
        # # Keep an eye on multiple rhs-solves in pyamg,
        # # <https://github.com/pyamg/pyamg/issues/215>.
        # out = numpy.column_stack(
        #     [ml.solve(rhs[:, 0], tol=tol), ml.solve(rhs[:, 1], tol=tol)]
        # )
        return out

    mesh = MeshTri(points, cells)
    runner(get_new_points,
           mesh,
           *args,
           **kwargs,
           method_name="exact Laplacian smoothing")
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #6
0
def fixed_point_uniform(points, cells, *args, boundary_step=None, **kwargs):
    """Idea:
    Move interior mesh points into the weighted averages of the centroids (barycenters)
    of their adjacent cells.
    """
    def get_new_points(mesh):
        X = get_new_points_averaged(mesh, mesh.cell_barycenters,
                                    mesh.cell_volumes)
        if boundary_step is None:
            # Reset boundary points to their original positions.
            idx = mesh.is_boundary_node
            X[idx] = mesh.node_coords[idx]
        else:
            # Move all boundary nodes back to the boundary.
            idx = mesh.is_boundary_node
            X[idx] = boundary_step(X[idx].T).T
        return X

    mesh = MeshTri(points, cells)
    runner(
        get_new_points,
        mesh,
        *args,
        **kwargs,
        method_name=
        "Centroidal Patch Tesselation (CPT), uniform density, fixed-point variant"
    )
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #7
0
def fixed_point_density_preserving(points, cells, *args, **kwargs):
    """Idea:
    Move interior mesh points into the weighted averages of the circumcenters
    of their adjacent cells.
    """
    def get_new_points(mesh):
        # Get circumcenters everywhere except at cells adjacent to the boundary;
        # barycenters there. The reason is that points near the boundary would be
        # "sucked" out of the domain if the boundary cell is very flat, i.e., its
        # circumcenter is very far outside of the domain.
        # This heuristic also applies to cells _near_ the boundary, though, and if
        # constructed maliciously, any mesh. Hence, this method can break down. A better
        # approach is to use barycenters for all cells which are sufficiently flat.
        cc = mesh.cell_circumcenters.copy()
        # Find all cells with a boundary edge
        is_boundary_cell = (numpy.sum(
            mesh.is_boundary_node[mesh.cells["nodes"]], axis=1) == 2)
        cc[is_boundary_cell] = mesh.cell_barycenters[is_boundary_cell]
        return get_new_points_count_averaged(mesh, cc)

    mesh = MeshTri(points, cells)
    runner(
        get_new_points,
        mesh,
        *args,
        **kwargs,
        method_name=
        "Optimal Delaunay Tesselation (ODT), density-preserving, fixed-point variant",
    )
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #8
0
def fixed_point_uniform(points, cells, *args, **kwargs):
    """Idea:
    Move interior mesh points into the weighted averages of the circumcenters
    of their adjacent cells. If a triangle cell switches orientation in the
    process, don't move quite so far.
    """
    def get_new_points(mesh):
        # Get circumcenters everywhere except at cells adjacent to the boundary;
        # barycenters there.
        cc = mesh.cell_circumcenters
        bc = mesh.cell_barycenters
        # Find all cells with a boundary edge
        boundary_cell_ids = mesh.edges_cells[1][:, 0]
        cc[boundary_cell_ids] = bc[boundary_cell_ids]
        return get_new_points_volume_averaged(mesh, cc)

    mesh = MeshTri(points, cells)
    runner(
        get_new_points,
        mesh,
        *args,
        **kwargs,
        method_name=
        "Optimal Delaunay Tesselation (ODT), uniform density, fixed-point variant",
    )
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #9
0
def random():
    n = 40
    pts, cells = create_random_circle(n, radius=1.0, seed=0)
    assert numpy.sum(MeshTri(pts, cells).is_boundary_node) == n

    meshio.write_points_cells("circle.xdmf", pts, {"triangle": cells})
    return
Exemple #10
0
def jac_uniform(X, cells):
    """The approximated Jacobian is

      partial_i E = 2/(d+1) (x_i int_{omega_i} rho(x) dx - int_{omega_i} x rho(x) dx)
                  = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_{j, rho}) int_{tau_j} rho,

    see Chen-Holst. This method here assumes uniform density, rho(x) = 1, such that

      partial_i E = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|

    with b_j being the ordinary barycenter.
    """
    dim = 2
    mesh = MeshTri(X, cells, flat_cell_correction=None)

    jac = numpy.zeros(X.shape)
    for k in range(mesh.cells["nodes"].shape[1]):
        i = mesh.cells["nodes"][:, k]
        fastfunc.add.at(
            jac,
            i,
            ((mesh.node_coords[i] - mesh.cell_barycenters).T *
             mesh.cell_volumes).T,
        )

    return 2 / (dim + 1) * jac
Exemple #11
0
def fixed_point_uniform(points, cells, *args, boundary_step=None, **kwargs):
    """Idea:
    Move interior mesh points into the weighted averages of the circumcenters
    of their adjacent cells. (Except on boundary cells; use barycenters there.)
    """
    def get_new_points(mesh):
        # Get circumcenters everywhere except at cells adjacent to the boundary;
        # barycenters there.
        cc = mesh.cell_circumcenters
        bc = mesh.cell_barycenters
        # Find all cells with a boundary edge
        boundary_cell_ids = mesh.edges_cells[1][:, 0]
        cc[boundary_cell_ids] = bc[boundary_cell_ids]
        X = get_new_points_averaged(mesh, cc, mesh.cell_volumes)
        if boundary_step is None:
            # Reset boundary points to their original positions.
            idx = mesh.is_boundary_point
            X[idx] = mesh.points[idx]
        else:
            # Move all boundary points back to the boundary.
            idx = mesh.is_boundary_point
            X[idx] = boundary_step(X[idx].T).T
        return X

    mesh = MeshTri(points, cells)
    runner(
        get_new_points,
        mesh,
        *args,
        **kwargs,
        method_name=
        "Optimal Delaunay Tesselation (ODT), uniform density, fixed-point variant",
    )
    return mesh.points, mesh.cells["points"]
Exemple #12
0
def _energy_uniform_per_node(X, cells):
    """The CPT mesh energy is defined as

        sum_i E_i,
        E_i = 1/(d+1) * sum int_{omega_i} ||x - x_i||^2 rho(x) dx,

    see Chen-Holst. This method gives the E_i and  assumes uniform density, rho(x) = 1.
    """
    dim = 2
    mesh = MeshTri(X, cells)

    star_integrals = numpy.zeros(mesh.node_coords.shape[0])
    # Python loop over the cells... slow!
    for cell, cell_volume in zip(mesh.cells["nodes"], mesh.cell_volumes):
        for idx in cell:
            xi = mesh.node_coords[idx]
            tri = mesh.node_coords[cell]
            val = quadpy.triangle.integrate(
                lambda x: numpy.einsum("ij,ij->i", x.T - xi, x.T - xi),
                tri,
                # Take any scheme with order 2
                quadpy.triangle.Dunavant(2),
            )
            star_integrals[idx] += val

    return star_integrals / (dim + 1)
Exemple #13
0
    def __init__(self, points, cells):
        # Add ghost points and cells for boundary facets
        msh = MeshTri(points, cells)
        self.ghost_mirror = []
        ghost_cells = []
        k = points.shape[0]
        for i in [[0, 1, 2], [1, 2, 0], [2, 0, 1]]:
            bf = msh.is_boundary_facet[i[0]]
            c = msh.cells["nodes"][bf].T
            self.ghost_mirror.append(c[i])
            n = c.shape[1]
            p = numpy.arange(k, k + n)
            ghost_cells.append(numpy.column_stack([p, c[i[1]], c[i[2]]]))
            k += n

        self.num_boundary_cells = numpy.sum(msh.is_boundary_facet)

        self.is_ghost_point = numpy.zeros(points.shape[0] +
                                          self.num_boundary_cells,
                                          dtype=bool)
        self.is_ghost_point[points.shape[0]:] = True
        is_ghost_cell = numpy.zeros(cells.shape[0] + self.num_boundary_cells,
                                    dtype=bool)
        self.ghost_cell_gids = numpy.arange(
            cells.shape[0], cells.shape[0] + self.num_boundary_cells)
        is_ghost_cell[cells.shape[0]:] = True

        self.ghost_mirror = numpy.concatenate(self.ghost_mirror, axis=1)
        assert self.ghost_mirror.shape[1] == self.num_boundary_cells

        self.num_original_points = points.shape[0]
        points = numpy.concatenate(
            [points,
             numpy.zeros((self.num_boundary_cells, points.shape[1]))])
        self.num_original_cells = cells.shape[0]
        cells = numpy.concatenate([cells, *ghost_cells])

        # Cache some values for the ghost reflection
        self.p1 = points[self.ghost_mirror[1]]
        mp2 = points[self.ghost_mirror[2]]
        self.mirror_edge = mp2 - self.p1
        self.beta = numpy.einsum("ij, ij->i", self.mirror_edge,
                                 self.mirror_edge)

        # Set ghost points
        points[self.is_ghost_point] = self.reflect_ghost(
            points[self.ghost_mirror[0]])

        # Create new mesh, remember the pseudo-boundary edges
        super(GhostedMesh, self).__init__(points, cells)

        self.create_edges()
        # Get the first edge in the ghost cells. (The first point is the ghost point,
        # and opposite edges have the same index.)
        self.ghost_edge_gids = self.cells["edges"][is_ghost_cell, 0]
        self.original_edges_nodes = self.edges["nodes"][self.ghost_edge_gids]

        self.update_ghost_mirrors()
        return
Exemple #14
0
 def get_unghosted_mesh(self):
     # Make deep copy to avoid influencing the actual mesh
     mesh2 = copy.deepcopy(self)
     mesh2.flip_interior_edges(self.get_flip_ghost_edges())
     # remove ghost cells
     # TODO this is too crude; sometimes the wrong cells are cut
     points = mesh2.node_coords[:self.num_original_points]
     cells = mesh2.cells["nodes"][:self.num_original_cells]
     return MeshTri(points, cells)
Exemple #15
0
def fixed_point_uniform(points, cells, *args, **kwargs):
    """Idea:
    Move interior mesh points into the weighted averages of the centroids
    (barycenters) of their adjacent cells.
    """

    def get_new_points(mesh):
        return get_new_points_volume_averaged(mesh, mesh.cell_barycenters)

    mesh = MeshTri(points, cells)
    runner(get_new_points, mesh, *args, **kwargs)
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #16
0
def _solve_hessian_approx_uniform(X, cells, rhs):
    """This approximation reproduces the fixed point iteration.
    """
    dim = 2
    mesh = MeshTri(X, cells)
    diag = numpy.zeros(X.shape[0])
    for i in range(3):
        fastfunc.add.at(diag, cells[:, i], mesh.cell_volumes)
    diag *= 2 / (dim + 1)
    out = (rhs.T / diag).T
    out[mesh.is_boundary_node] = 0.0
    return out
Exemple #17
0
def linear_solve_density_preserving(points, cells, *args, **kwargs):
    def get_new_points(mesh, tol=1.0e-10):
        cells = mesh.cells["nodes"].T

        row_idx = []
        col_idx = []
        val = []
        a = numpy.ones(cells.shape[1], dtype=float)
        for i in [[0, 1], [1, 2], [2, 0]]:
            edges = cells[i]
            row_idx += [edges[0], edges[1], edges[0], edges[1]]
            col_idx += [edges[0], edges[1], edges[1], edges[0]]
            val += [+a, +a, -a, -a]

        row_idx = numpy.concatenate(row_idx)
        col_idx = numpy.concatenate(col_idx)
        val = numpy.concatenate(val)

        n = mesh.node_coords.shape[0]

        # Create CSR matrix for efficiency
        matrix = scipy.sparse.coo_matrix((val, (row_idx, col_idx)), shape=(n, n))
        matrix = matrix.tocsr()

        # Apply Dirichlet conditions.
        verts = numpy.where(mesh.is_boundary_node)[0]
        # Set all Dirichlet rows to 0.
        for i in verts:
            matrix.data[matrix.indptr[i] : matrix.indptr[i + 1]] = 0.0
        # Set the diagonal and RHS.
        d = matrix.diagonal()
        d[mesh.is_boundary_node] = 1.0
        matrix.setdiag(d)

        rhs = numpy.zeros((n, mesh.node_coords.shape[1]))
        rhs[mesh.is_boundary_node] = mesh.node_coords[mesh.is_boundary_node]

        out = scipy.sparse.linalg.spsolve(matrix, rhs)

        # PyAMG fails on circleci.
        # ml = pyamg.ruge_stuben_solver(matrix)
        # # Keep an eye on multiple rhs-solves in pyamg,
        # # <https://github.com/pyamg/pyamg/issues/215>.
        # out = numpy.column_stack(
        #     [ml.solve(rhs[:, 0], tol=tol), ml.solve(rhs[:, 1], tol=tol)]
        # )
        return out

    mesh = MeshTri(points, cells)
    runner(get_new_points, mesh, *args, **kwargs)
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #18
0
def quasi_newton_uniform_lloyd(points, cells, *args, boundary_step=None, **kwargs):
    """Lloyd's algorithm.
    Check out

    Xiao Xiao,
    Over-Relaxation Lloyd Method For Computing Centroidal Voronoi Tessellations,
    Master's thesis, Jan. 2010,
    University of South Carolina,
    <https://scholarcommons.sc.edu/etd/295/>

    for use of the relaxation paramter. (omega=2 is suggested.)

    Everything above omega=2 can lead to flickering, i.e., rapidly alternating updates
    and bad meshes.
    """

    def get_new_points(mesh):
        # Exclude all cells which have a too negative covolume-edgelength ratio. This is
        # necessary to prevent nodes to be dragged outside of the domain by very flat
        # cells on the boundary.
        # There are other possible heuristics too. For example, one could restrict the
        # mask to cells at or near the boundary.
        mask = numpy.any(mesh.ce_ratios < -0.5, axis=0)

        X = mesh.get_control_volume_centroids(cell_mask=mask)

        # When using a cell mask, it can happen that some nodes don't get any
        # contribution at all because they are adjacent only to masked cells. Reset
        # those, too.
        idx = numpy.any(numpy.isnan(X), axis=1)
        X[idx] = mesh.node_coords[idx]

        if boundary_step is None:
            # Reset boundary points to their original positions.
            idx = mesh.is_boundary_node
            X[idx] = mesh.node_coords[idx]
        else:
            # Move all boundary nodes back to the boundary.
            idx = mesh.is_boundary_node
            X[idx] = boundary_step(X[idx].T).T

        return X

    mesh = MeshTri(points, cells)

    method_name = "Lloyd's algorithm"
    runner(get_new_points, mesh, *args, **kwargs, method_name=method_name)

    return mesh.node_coords, mesh.cells["nodes"]
Exemple #19
0
def info(argv=None):
    parser = _get_parser()
    args = parser.parse_args(argv)

    mesh = meshio.read(args.input_file)

    cells = mesh.get_cells_type("triangle")

    print("Number of points: {}".format(mesh.points.shape[0]))
    print("Number of elements:")
    for cell_type, value in mesh.cells:
        print("  {}: {}".format(cell_type, value.shape[0]))

    mesh = MeshTri(mesh.points, cells)
    print_stats(mesh)
Exemple #20
0
def _energy_uniform_per_point(X, cells):
    """The CPT mesh energy is defined as

        sum_i E_i,
        E_i = 1/(d+1) * sum int_{omega_i} ||x - x_i||^2 rho(x) dx,

    see Chen-Holst. This method gives the E_i and  assumes uniform density, rho(x) = 1.
    """
    mesh = MeshTri(X, cells)

    star_integrals = np.zeros(mesh.points.shape[0])
    # Python loop over the cells... slow!
    for cell in mesh.cells("points"):
        for idx in cell:
            xi = mesh.points[idx]
            tri = mesh.points[cell]
            # Get a scheme of order 2
            scheme = quadpy.t2.get_good_scheme(2)
            val = scheme.integrate(
                lambda x: np.einsum("ij,ij->i", x.T - xi, x.T - xi), tri)
            star_integrals[idx] += val

    dim = 2
    return star_integrals / (dim + 1)
Exemple #21
0
def info(argv=None):
    parser = _get_parser()
    args = parser.parse_args(argv)

    mesh = meshio.read(args.input_file)

    cells = mesh.cells["triangle"]

    print("Number of points: {}".format(mesh.points.shape[0]))
    print("Number of elements:")
    for key, value in mesh.cells.items():
        print("  {}: {}".format(key, value.shape[0]))

    mesh = MeshTri(mesh.points, cells)
    print_stats(mesh)

    return
Exemple #22
0
def quasi_newton_uniform_full(points, cells, *args, **kwargs):
    def get_new_points(mesh):
        # TODO need copy?
        x = mesh.node_coords.copy()
        x += update(mesh)
        return x

    mesh = MeshTri(points, cells)

    runner(
        get_new_points,
        mesh,
        *args,
        **kwargs,
        method_name=
        "Centroidal Voronoi Tesselation (CVT), uniform density, full-Hessian variant"
    )
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #23
0
def fixed_point_uniform(points, cells, *args, **kwargs):
    """Idea:
    Move interior mesh points into the weighted averages of the centroids
    (barycenters) of their adjacent cells.
    """
    def get_new_points(mesh):
        return get_new_points_volume_averaged(mesh, mesh.cell_barycenters)

    mesh = MeshTri(points, cells)
    runner(
        get_new_points,
        mesh,
        *args,
        **kwargs,
        method_name=
        "Centroidal Patch Tesselation (CPT), uniform density, fixed-point variant"
    )
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #24
0
def fixed_point_density_preserving(points, cells, *args, **kwargs):
    """Idea:
    Move interior mesh points into the weighted averages of the circumcenters
    of their adjacent cells. If a triangle cell switches orientation in the
    process, don't move quite so far.
    """
    def get_new_points(mesh):
        # Get circumcenters everywhere except at cells adjacent to the boundary;
        # barycenters there.
        cc = mesh.cell_circumcenters
        bc = mesh.cell_barycenters
        # Find all cells with a boundary edge
        boundary_cell_ids = mesh.edges_cells[1][:, 0]
        cc[boundary_cell_ids] = bc[boundary_cell_ids]
        return get_new_points_count_averaged(mesh, cc)

    mesh = MeshTri(points, cells)
    runner(get_new_points, mesh, *args, **kwargs)
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #25
0
def fixed_point_density_preserving(points,
                                   cells,
                                   *args,
                                   boundary_step=None,
                                   **kwargs):
    """Idea:
    Move interior mesh points into the weighted averages of the circumcenters
    of their adjacent cells.
    """
    def get_new_points(mesh):
        # Get circumcenters everywhere except at cells adjacent to the boundary;
        # barycenters there. The reason is that points near the boundary would be
        # "sucked" out of the domain if the boundary cell is very flat, i.e., its
        # circumcenter is very far outside of the domain.
        # This heuristic also applies to cells _near_ the boundary though, and, if
        # constructed maliciously, any mesh. Hence, this method can break down. A better
        # approach is to use barycenters for all cells which are rather flat.
        cc = mesh.cell_circumcenters.copy()
        # Find all cells with a boundary edge
        is_boundary_cell = (numpy.sum(
            mesh.is_boundary_point[mesh.cells["points"]], axis=1) == 2)
        cc[is_boundary_cell] = mesh.cell_barycenters[is_boundary_cell]
        X = get_new_points_averaged(mesh, cc)
        if boundary_step is None:
            # Reset boundary points to their original positions.
            idx = mesh.is_boundary_point
            X[idx] = mesh.points[idx]
        else:
            # Move all boundary points back to the boundary.
            idx = mesh.is_boundary_point
            X[idx] = boundary_step(X[idx].T).T
        return X

    mesh = MeshTri(points, cells)
    runner(
        get_new_points,
        mesh,
        *args,
        **kwargs,
        method_name=
        "Optimal Delaunay Tesselation (ODT), density-preserving, fixed-point variant",
    )
    return mesh.points, mesh.cells["points"]
Exemple #26
0
def circle_random():
    n = 40
    radius = 1.0
    k = numpy.arange(n)
    boundary_pts = radius * numpy.column_stack(
        [numpy.cos(2 * numpy.pi * k / n),
         numpy.sin(2 * numpy.pi * k / n)])

    # Compute the number of interior nodes such that all triangles can be somewhat
    # equilateral.
    edge_length = 2 * numpy.pi * radius / n
    domain_area = numpy.pi - n * (radius**2 / 2 *
                                  (edge_length - numpy.sin(edge_length)))
    cell_area = numpy.sqrt(3) / 4 * edge_length**2
    approximate_num_cells = domain_area / cell_area
    # Euler:
    # 2 * num_points - num_boundary_edges - 2 = num_cells
    # <=>
    # num_interior_points ~= 0.5 * (num_cells + num_boundary_edges) + 1
    m = int(0.5 * (approximate_num_cells + n) + 1)

    # generate random points in circle; <http://mathworld.wolfram.com/DiskPointPicking.html>
    numpy.random.seed(1)
    r = numpy.random.rand(m)
    alpha = 2 * numpy.pi * numpy.random.rand(m)

    interior_pts = numpy.column_stack(
        [numpy.sqrt(r) * numpy.cos(alpha),
         numpy.sqrt(r) * numpy.sin(alpha)])

    pts = numpy.concatenate([boundary_pts, interior_pts])

    tri = Delaunay(pts)
    pts = numpy.column_stack([pts[:, 0], pts[:, 1], numpy.zeros(pts.shape[0])])

    # Make sure there are exactly `n` boundary points
    mesh = MeshTri(pts, tri.simplices)
    assert numpy.sum(mesh.is_boundary_node) == n

    return pts, tri.simplices
Exemple #27
0
def fixed_point(points, cells, *args, **kwargs):
    """Perform k steps of Laplacian smoothing to the mesh, i.e., moving each
    interior vertex to the arithmetic average of its neighboring points.
    """

    def get_new_points(mesh):
        # move interior points into average of their neighbors
        num_neighbors = numpy.zeros(len(mesh.node_coords), dtype=int)
        idx = mesh.edges["nodes"]
        fastfunc.add.at(num_neighbors, idx, numpy.ones(idx.shape, dtype=int))

        new_points = numpy.zeros(mesh.node_coords.shape)
        fastfunc.add.at(new_points, idx[:, 0], mesh.node_coords[idx[:, 1]])
        fastfunc.add.at(new_points, idx[:, 1], mesh.node_coords[idx[:, 0]])

        new_points /= num_neighbors[:, None]
        idx = mesh.is_boundary_node
        new_points[idx] = mesh.node_coords[idx]
        return new_points

    mesh = MeshTri(points, cells)
    runner(get_new_points, mesh, *args, **kwargs)
    return mesh.node_coords, mesh.cells["nodes"]
Exemple #28
0
def nonlinear_optimization(
    X, cells, tol, max_num_steps, verbosity=1, step_filename_format=None
):
    """Optimal Delaunay Triangulation smoothing.

    This method minimizes the energy

        E = int_Omega |u_l(x) - u(x)| rho(x) dx

    where u(x) = ||x||^2, u_l is its piecewise linear nodal interpolation and
    rho is the density. Since u(x) is convex, u_l >= u everywhere and

        u_l(x) = sum_i phi_i(x) u(x_i)

    where phi_i is the hat function at x_i. With rho(x)=1, this gives

        E = int_Omega sum_i phi_i(x) u(x_i) - u(x)
          = 1/(d+1) sum_i ||x_i||^2 |omega_i| - int_Omega ||x||^2

    where d is the spatial dimension and omega_i is the star of x_i (the set of
    all simplices containing x_i).
    """
    import scipy.optimize

    # TODO remove this assertion and test
    # flat mesh
    assert X.shape[1] == 2

    mesh = MeshTri(X, cells, flat_cell_correction=None)

    if step_filename_format:
        mesh.save(
            step_filename_format.format(0),
            show_centroids=False,
            show_coedges=False,
            show_axes=False,
            nondelaunay_edge_color="k",
        )

    if verbosity > 0:
        print("Before:")
        extra_cols = ["energy: {:.5e}".format(energy(mesh))]
        print_stats(mesh, extra_cols=extra_cols)

    def f(x):
        mesh.update_interior_node_coordinates(x.reshape(-1, 2))
        return energy(mesh, uniform_density=True)

    # TODO put f and jac together
    def jac(x):
        mesh.update_interior_node_coordinates(x.reshape(-1, 2))

        grad = numpy.zeros(mesh.node_coords.shape)
        cc = mesh.cell_circumcenters
        for mcn in mesh.cells["nodes"].T:
            fastfunc.add.at(
                grad, mcn, ((mesh.node_coords[mcn] - cc).T * mesh.cell_volumes).T
            )
        gdim = 2
        grad *= 2 / (gdim + 1)
        return grad[mesh.is_interior_node, :2].flatten()

    def flip_delaunay(x):
        flip_delaunay.step += 1
        # Flip the edges
        mesh.update_interior_node_coordinates(x.reshape(-1, 2))
        mesh.flip_until_delaunay()

        if step_filename_format:
            mesh.save(
                step_filename_format.format(flip_delaunay.step),
                show_centroids=False,
                show_coedges=False,
                show_axes=False,
                nondelaunay_edge_color="k",
            )
        if verbosity > 1:
            print("\nStep {}:".format(flip_delaunay.step))
            print_stats(mesh, extra_cols=["energy: {}".format(f(x))])

        # mesh.show()
        # exit(1)
        return

    flip_delaunay.step = 0

    x0 = X[mesh.is_interior_node, :2].flatten()

    out = scipy.optimize.minimize(
        f,
        x0,
        jac=jac,
        method="CG",
        # method='newton-cg',
        tol=tol,
        callback=flip_delaunay,
        options={"maxiter": max_num_steps},
    )
    # Don't assert out.success; max_num_steps may be reached, that's fine.

    # One last edge flip
    mesh.update_interior_node_coordinates(out.x.reshape(-1, 2))
    mesh.flip_until_delaunay()

    if verbosity > 0:
        print("\nFinal ({} steps):".format(out.nit))
        extra_cols = ["energy: {:.5e}".format(energy(mesh))]
        print_stats(mesh, extra_cols=extra_cols)
        print()

    return mesh.node_coords, mesh.cells["nodes"]
Exemple #29
0
def solve_hessian_approx_uniform(X, cells, rhs):
    """As discussed above, the approximated Jacobian is

      partial_i E = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|.

    To get the Hessian, we have to form its derivative. As a simplifications,
    let us assume again that |tau_j| is independent of the node positions. Then we get

       partial_ii E = 2/(d+1) |omega_i| - 2/(d+1)**2 |omega_i|,
       partial_ij E = -2/(d+1)**2 |tau_j|.

    The terms with (d+1)**2 are from the barycenter in `partial_i E`. It turns out from
    numerical experiments that the negative term in `partial_ii E` is detrimental to the
    convergence. Hence, this approximated Hessian solver only considers the off-diagonal
    contributions from the barycentric terms.
    """
    dim = 2
    mesh = MeshTri(X, cells)

    # Create matrix in IJV format
    row_idx = []
    col_idx = []
    val = []

    cells = mesh.cells["nodes"].T
    n = X.shape[0]

    # Main diagonal, 2/(d+1) |omega_i| x_i
    a = mesh.cell_volumes * (2 / (dim + 1))
    for i in [0, 1, 2]:
        row_idx += [cells[i]]
        col_idx += [cells[i]]
        val += [a]

    # terms corresponding to -2/(d+1) * b_j |tau_j|
    a = mesh.cell_volumes * (2 / (dim + 1)**2)
    for i in [[0, 1, 2], [1, 2, 0], [2, 0, 1]]:
        edges = cells[i]
        # Leads to funny osciilatory movements
        # row_idx += [edges[0], edges[0], edges[0]]
        # col_idx += [edges[0], edges[1], edges[2]]
        # val += [-a, -a, -a]
        # Best so far
        row_idx += [edges[0], edges[0]]
        col_idx += [edges[1], edges[2]]
        val += [-a, -a]

    row_idx = numpy.concatenate(row_idx)
    col_idx = numpy.concatenate(col_idx)
    val = numpy.concatenate(val)

    # Set Dirichlet conditions on the boundary
    matrix = scipy.sparse.coo_matrix((val, (row_idx, col_idx)), shape=(n, n))
    # Transform to CSR format for efficiency
    matrix = matrix.tocsr()

    # Apply Dirichlet conditions.
    # Set all Dirichlet rows to 0.
    for i in numpy.where(mesh.is_boundary_node)[0]:
        matrix.data[matrix.indptr[i]:matrix.indptr[i + 1]] = 0.0
    # Set the diagonal and RHS.
    d = matrix.diagonal()
    d[mesh.is_boundary_node] = 1.0
    matrix.setdiag(d)

    rhs[mesh.is_boundary_node] = 0.0

    out = scipy.sparse.linalg.spsolve(matrix, rhs)

    # PyAMG fails on circleci.
    # ml = pyamg.ruge_stuben_solver(matrix)
    # # Keep an eye on multiple rhs-solves in pyamg,
    # # <https://github.com/pyamg/pyamg/issues/215>.
    # tol = 1.0e-10
    # out = numpy.column_stack(
    #     [ml.solve(rhs[:, 0], tol=tol), ml.solve(rhs[:, 1], tol=tol)]
    # )
    return out
Exemple #30
0
def nonlinear_optimization_uniform(
    X,
    cells,
    tol,
    max_num_steps,
    verbose=False,
    step_filename_format=None,
    callback=None,
):
    """Optimal Delaunay Tesselation smoothing.

    This method minimizes the energy

        E = int_Omega |u_l(x) - u(x)| rho(x) dx

    where u(x) = ||x||^2, u_l is its piecewise linear nodal interpolation and
    rho is the density. Since u(x) is convex, u_l >= u everywhere and

        u_l(x) = sum_i phi_i(x) u(x_i)

    where phi_i is the hat function at x_i. With rho(x)=1, this gives

        E = int_Omega sum_i phi_i(x) u(x_i) - u(x)
          = 1/(d+1) sum_i ||x_i||^2 |omega_i| - int_Omega ||x||^2

    where d is the spatial dimension and omega_i is the star of x_i (the set of
    all simplices containing x_i).
    """
    import scipy.optimize

    mesh = MeshTri(X, cells)

    if step_filename_format:
        mesh.save(
            step_filename_format.format(0),
            show_coedges=False,
            show_axes=False,
            cell_quality_coloring=("viridis", 0.0, 1.0, False),
        )

    if verbose:
        print("Before:")
        extra_cols = ["energy: {:.5e}".format(energy(mesh))]
        print_stats(mesh, extra_cols=extra_cols)

    def f(x):
        mesh.set_points(x.reshape(-1, X.shape[1]), mesh.is_interior_point)
        return energy(mesh, uniform_density=True)

    # TODO put f and jac together
    def jac(x):
        mesh.set_points(x.reshape(-1, X.shape[1]), mesh.is_interior_point)

        grad = numpy.zeros(mesh.points.shape)
        n = grad.shape[0]
        cc = mesh.cell_circumcenters
        for mcn in mesh.cells["points"].T:
            vals = (mesh.points[mcn] - cc).T * mesh.cell_volumes
            # numpy.add.at(grad, mcn, vals)
            grad += numpy.array(
                [numpy.bincount(mcn, val, minlength=n) for val in vals]).T
        gdim = 2
        grad *= 2 / (gdim + 1)
        return grad[mesh.is_interior_point].flatten()

    def flip_delaunay(x):
        flip_delaunay.step += 1
        # Flip the edges
        mesh.set_points(x.reshape(-1, X.shape[1]), mesh.is_interior_point)
        mesh.flip_until_delaunay()

        if step_filename_format:
            mesh.save(
                step_filename_format.format(flip_delaunay.step),
                show_coedges=False,
                show_axes=False,
                cell_quality_coloring=("viridis", 0.0, 1.0, False),
            )

        if callback:
            callback(flip_delaunay.step, mesh)

        # mesh.show()
        # exit(1)
        return

    flip_delaunay.step = 0

    x0 = X[mesh.is_interior_point].flatten()

    if callback:
        callback(0, mesh)

    out = scipy.optimize.minimize(
        f,
        x0,
        jac=jac,
        # method="Nelder-Mead",
        # method="Powell",
        # method="CG",
        # method="Newton-CG",
        method="BFGS",
        # method="L-BFGS-B",
        # method="TNC",
        # method="COBYLA",
        # method="SLSQP",
        tol=tol,
        callback=flip_delaunay,
        options={"maxiter": max_num_steps},
    )
    # Don't assert out.success; max_num_steps may be reached, that's fine.

    # One last edge flip
    mesh.set_points(out.x.reshape(-1, X.shape[1]), mesh.is_interior_point)

    mesh.flip_until_delaunay()

    info = (
        f"{out.nit} steps," +
        "Optimal Delaunay Tesselation (ODT), uniform density, BFGS variant")
    if verbose:
        print(f"\nFinal ({info})")
        extra_cols = ["energy: {:.5e}".format(energy(mesh))]
        print_stats(mesh, extra_cols=extra_cols)
        print()

    return mesh.points, mesh.cells["points"]