Пример #1
0
def _tag_injection_cell(
    gb: pp.GridBucket, g: pp.Grid, pts: np.ndarray, length_scale
) -> None:
    """ Helper method to tag find closest point on g to pts

    The tag is set locally to g and to node props on gb.
    length_scale is used to log the unscaled distance to
    the injection cell from pts.

    Parameters
    ----------
    gb : pp.GridBucket
    g : pp.Grid
    pts : np.ndarray, shape: (3,1)
    length_scale : float

    """
    assert pts.shape == (3, 1), "We only consider one point; array needs shape 3x1"
    tags = np.zeros(g.num_cells)
    ids, dsts = g.closest_cell(pts, return_distance=True)
    tags[ids] = 1
    g.tags["well_cells"] = tags
    d = gb.node_props(g)
    pp.set_state(d, {"well": tags})

    # Log information on the injection point
    logger.info(
        f"Closest cell found has (unscaled) distance: {dsts[0] * length_scale:4f}\n"
        f"ideal (scaled) point coordinate: {pts.T}\n"
        f"nearest (scaled) cell center coordinate: {g.cell_centers[:, ids].T}\n"
    )
Пример #2
0
def _append_face_geometry_fracture_grid(
    g: pp.Grid, n_new_faces: int, new_centers: np.ndarray
) -> None:
    """
    Appends and updates faces geometry information for new faces. Also updates
    num_faces.
    """
    g.face_normals = np.append(g.face_normals, np.zeros((3, n_new_faces)), axis=1)
    g.face_areas = np.append(g.face_areas, np.ones(n_new_faces))
    g.face_centers = np.append(g.face_centers, new_centers, axis=1)
    g.num_faces += n_new_faces
Пример #3
0
def match_2d(new_g: pp.Grid, old_g: pp.Grid, tol: float):
    """Match two simplex tessalations to identify overlapping cells.

    The overlaps are identified by the cell index of the two overlapping cells,
    and their weighted common area.

    Parameters:
        new_g: simplex grid of dimension 2.
        old_g: simplex grid of dimension 2.

    Returns:
        np.array: Ratio of cell volume in the common grid and the original grid.
        np.array: Index of overlapping cell in the first grid.
        np.array: Index of overlapping cell in the second grid.

    """

    @pp.time_logger(sections=module_sections)
    def proj_pts(p, cc, normal):
        """ Project points to the 2d plane defined by normal and center them around cc"""
        rot = pp.map_geometry.project_plane_matrix(p - cc, normal)
        return rot.dot(p - cc)[:2]

    shape = (new_g.dim + 1, new_g.num_cells)
    cn_new_g = new_g.cell_nodes().indices.reshape(shape, order="F")

    shape = (old_g.dim + 1, old_g.num_cells)
    cn_old_g = old_g.cell_nodes().indices.reshape(shape, order="F")

    # Center points around mean
    cc = np.mean(new_g.nodes, axis=1).reshape((3, 1))
    # Calculate common normal for both grids
    n = pp.map_geometry.compute_normal(new_g.nodes - cc)
    n_old = pp.map_geometry.compute_normal(old_g.nodes - cc)
    if not (np.allclose(n, n_old) or np.allclose(n, -n_old)):
        raise ValueError("The new and old grid must lie in the same plane")

    # Calculate intersection
    isect = pp.intersections.triangulations(
        proj_pts(new_g.nodes, cc, n), proj_pts(old_g.nodes, cc, n), cn_new_g, cn_old_g
    )

    num = len(isect)
    new_g_ind = np.zeros(num, dtype=int)
    old_g_ind = np.zeros(num, dtype=int)
    weights = np.zeros(num)

    for ind, i in enumerate(isect):
        new_g_ind[ind] = i[0]
        old_g_ind[ind] = i[1]
        weights[ind] = i[2]

    weights /= old_g.cell_volumes[old_g_ind]
    return weights, new_g_ind, old_g_ind
Пример #4
0
def _update_nodes_fracture_grid(
        g_h: pp.Grid, g_l: pp.Grid,
        faces_h: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
    """
    Finds the nodes in the lower-dimensional grid corresponding to the higher-
    dimensional faces to be split. Updates node information in g_l:
        global_point_ind
        nodes
        num_nodes

    Returns:
        unique_nodes_l - numpy array (number of involved nodes x 1) Indices of
            the nodes (as arranged in g_l.nodes).
        unique_nodes_h - same, but corresponding to g_h.nodes.

    """
    # Nodes of g_h to be split
    nodes_h = g_h.face_nodes[:, faces_h].nonzero()[0]
    unique_nodes_h = np.unique(nodes_h)

    # Global index of nodes to split
    unique_global_nodes = g_h.global_point_ind[unique_nodes_h]

    # Some of the nodes of the face to be split will be in g_l already (as tip nodes)
    # Find which are present, and which should be added
    # NOTE: This comparison must be done in terms of global_point_ind
    are_old_global_nodes_in_l = np.in1d(unique_global_nodes,
                                        g_l.global_point_ind)
    are_new_global_nodes_in_l = np.logical_not(are_old_global_nodes_in_l)

    # Index in g_h, of nodes to be added to g_l
    new_node_indices_h = unique_nodes_h[are_new_global_nodes_in_l]
    # Global indices of nodes to be added to g_l
    new_global_node_indices_l = unique_global_nodes[are_new_global_nodes_in_l]

    # Append nodes to g_l and update bookkeeping
    new_nodes_l = g_h.nodes[:, new_node_indices_h].copy()
    g_l.nodes = np.append(g_l.nodes, new_nodes_l, axis=1)
    n_new_nodes = new_nodes_l.shape[1]
    g_l.num_nodes += n_new_nodes

    # Append global point indices to g_l
    g_l.global_point_ind = np.append(g_l.global_point_ind,
                                     new_global_node_indices_l)

    # Find index of the updated nodes in g_l that belong to the split faces
    # Order preserving find:
    unique_nodes_l = pp.utils.setmembership.ismember_rows(unique_global_nodes,
                                                          g_l.global_point_ind,
                                                          sort=False)[1]

    return unique_nodes_l, unique_nodes_h
Пример #5
0
def _update_cells_fracture_grid(
    g_h: pp.Grid, g_l: pp.Grid, faces_h: np.ndarray
) -> np.ndarray:
    """
    Cell information for g_l is inherited directly from the higher-dimensional
    faces we are splitting. The function updates num_cells, cell_centers and
    cell_volumes.
    """
    n_new_cells = g_l.num_cells + faces_h.size
    new_cells = np.arange(g_l.num_cells, n_new_cells)
    g_l.num_cells = n_new_cells
    g_l.cell_centers = np.append(g_l.cell_centers, g_h.face_centers[:, faces_h], axis=1)
    g_l.cell_volumes = np.append(g_l.cell_volumes, g_h.face_areas[faces_h])
    return new_cells
Пример #6
0
    def update_primary(self, g_new: pp.Grid, g_old: pp.Grid, tol: float = None):
        """

        Update the _primary_to_mortar_int map when the primary (higher-dimensional) grid is
        changed.

        Parameter:
            g_new (pp.Grid): The new primary grid.
            g_old (pp.Grid): The old primary grid.
            tol (double, optional): Tolerance used for matching the new and old grids.
                Defaults to self.tol.
        """
        # IMPLEMENTATION NOTE: The signature of this method is different from
        # update_secondary(), since the latter must also take care of for the side grids.

        if tol is None:
            tol = self.tol

        if self.dim == 0:

            # retrieve the old faces and the corresponding coordinates
            _, old_faces, _ = sps.find(self._primary_to_mortar_int)
            old_nodes = g_old.face_centers[:, old_faces]

            # retrieve the boundary faces and the corresponding coordinates
            new_faces = g_new.get_all_boundary_faces()
            new_nodes = g_new.face_centers[:, new_faces]

            # we assume only one old node
            for i in range(1, old_nodes.shape[1]):
                is_same = (
                    pp.distances.point_pointset(old_nodes[:, 0], old_nodes[:, i]) < tol
                )
                if not is_same:
                    raise ValueError(
                        "0d->1d mappings must map to the same physical point"
                    )
            old_nodes = old_nodes[:, 0]
            mask = pp.distances.point_pointset(old_nodes, new_nodes) < tol
            new_faces = new_faces[mask]

            shape = (g_old.num_faces, g_new.num_faces)
            matrix_DIJ = (np.ones(old_faces.shape), (old_faces, new_faces))
            split_matrix = sps.csc_matrix(matrix_DIJ, shape=shape)

        elif self.dim == 1:
            # The case is conceptually similar to 0d, but quite a bit more technical
            split_matrix = pp.match_grids.match_grids_along_1d_mortar(
                self, g_new, g_old, tol
            )

        else:  # should be mg.dim == 2
            # It should be possible to use essentially the same approach as in 1d,
            # but this is not yet covered.
            raise NotImplementedError("Have not yet implemented this.")

        # Make a comment here
        self._primary_to_mortar_int = self._primary_to_mortar_int * split_matrix
        self._check_mappings()
 def _bc_type(self, g: pp.Grid) -> pp.BoundaryConditionVectorial:
     """
     We set Neumann values imitating an anisotropic background stress regime on all
     but three faces, which are fixed to ensure a unique solution.
     """
     bc = pp.BoundaryConditionVectorial(g, g.get_all_boundary_faces(),
                                        "dir")
     return bc
Пример #8
0
def remesh_1d(g_old: pp.Grid,
              num_nodes: int,
              tol: Optional[float] = 1e-6) -> pp.Grid:
    """Create a new 1d mesh covering the same domain as an old one.

    The new grid is equispaced, and there is no guarantee that the nodes in
    the old and new grids are coincinding. Use with care, in particular for
    grids with internal boundaries.

    Parameters:
        g_old (pp.Grid): 1d grid to be replaced.
        num_nodes (int): Number of nodes in the new grid.
        tol (double, optional): Tolerance used to compare node coornidates
            (for mapping of boundary conditions). Defaults to 1e-6.

    Returns:
        pp.Grid: New grid.

    """

    # Create equi-spaced nodes covering the same domain as the old grid
    theta = np.linspace(0, 1, num_nodes)
    start, end = g_old.get_all_boundary_nodes()
    # Not sure why the new axis was necessary.
    nodes = g_old.nodes[:, start,
                        np.newaxis] * theta + g_old.nodes[:, end,
                                                          np.newaxis] * (1.0 -
                                                                         theta)

    # Create the new grid, and assign nodes.
    g = TensorGrid(nodes[0, :])
    g.nodes = nodes
    g.compute_geometry()

    # map the tags from the old grid to the new one
    # normally the tags are given at faces/point that are fixed the 1d mesh
    # we use this assumption to proceed.
    for f_old in np.arange(g_old.num_faces):
        # detect in the new grid which face is geometrically the same (upon a tolerance)
        # as in the old grid
        dist = pp.distances.point_pointset(g_old.face_centers[:, f_old],
                                           g.face_centers)
        f_new = np.where(dist < tol)[0]

        # if you find a match transfer all the tags from the face in the old grid to
        # the face in the new grid
        if f_new.size:
            if f_new.size != 1:
                raise ValueError(
                    "It cannot be more than one face, something went wrong")
            for tag in pp.utils.tags.standard_face_tags():
                g.tags[tag][f_new] = g_old.tags[tag][f_old]

    g.update_boundary_node_tag()

    return g
Пример #9
0
    def bc_type_mechanics(self, g: pp.Grid) -> pp.BoundaryConditionVectorial:  # noqa
        # Define boundary regions
        all_bf = g.get_boundary_faces()
        bc = pp.BoundaryConditionVectorial(g, all_bf, "dir")  # noqa

        # Internal faces are Neumann by default. We change them to
        # Dirichlet for the contact problem. That is: The mortar
        # variable represents the displacement on the fracture faces.
        frac_face = g.tags["fracture_faces"]
        bc.is_neu[:, frac_face] = False
        bc.is_dir[:, frac_face] = True
        return bc
Пример #10
0
 def _bc_type(self, g: pp.Grid) -> pp.BoundaryConditionVectorial:
     """Define type of boundary conditions: Dirichlet on all global boundaries,
     Dirichlet also on fracture faces.
     """
     all_bf = g.get_boundary_faces()
     bc = pp.BoundaryConditionVectorial(g, all_bf, "dir")
     # Default internal BC is Neumann. We change to Dirichlet for the contact
     # problem. I.e., the mortar variable represents the displacement on the
     # fracture faces.
     frac_face = g.tags["fracture_faces"]
     bc.is_neu[:, frac_face] = False
     bc.is_dir[:, frac_face] = True
     return bc
Пример #11
0
def distort_grid_1d(g: pp.Grid,
                    ratio: Optional[float] = 0.1,
                    fixed_nodes: Optional[np.ndarray] = None) -> pp.Grid:
    """Randomly distort internal nodes in a 1d grid.

    The boundary nodes are left untouched.

    The perturbations will not perturb the topology of the mesh.

    Parameters:
         g (pp.grid): To be perturbed. Modifications will happen in place.
         ratio (int, optional, defaults to 0.1): Perturbation ratio. A node can be
              moved at most half the distance in towards any of its
              neighboring nodes. The ratio will multiply the chosen
              distortion. Should be less than 1 to preserve grid topology.
         fixed_nodes (np.array, optional): Index of nodes to keep fixed under
             distortion. Boundary nodes will always be fixed, even if not
             expli)itly included as fixed_node

    Returns:
         grid: With distorted nodes

    """
    if fixed_nodes is None:
        fixed_nodes = np.array([0, g.num_nodes - 1], dtype=int)
    else:
        # Ensure that boundary nodes are also fixed
        fixed_nodes = np.hstack((fixed_nodes, np.array([0, g.num_nodes - 1])))
        fixed_nodes = np.unique(fixed_nodes).astype(int)

    g.compute_geometry()
    r = ratio * (0.5 - np.random.random(g.num_nodes - 2))
    r *= np.minimum(g.cell_volumes[:-1], g.cell_volumes[1:])
    direction = (g.nodes[:, -1] - g.nodes[:, 0]).reshape((-1, 1))
    nrm = np.linalg.norm(direction)
    g.nodes[:, 1:-1] += r * direction / nrm
    g.compute_geometry()
    return g
Пример #12
0
    def _tip_bases(
        self,
        g: pp.Grid,
        projection: pp.TangentialNormalProjection,
        faces: np.ndarray,
    ) -> np.ndarray:
        """
        Construct local bases for tip faces of a fracture.

        Note: The orientation of a 2d basis may be found by
            np.cross(basis[0], basis[1])
        Parameters
        ----------
        g : grid.
        data_edge : dictionary

        faces : array
            The tip faces for which local bases are constructed.

        Returns
        -------
        basis : np.ndarray
            Basis vectors. nd x nd x nd. The first axis is for the basis vectors,
            the second is the dimension and the last for the tip faces. I.e.,
            basis vector i of tip face j is basis[i,:,j]. The ordering of the
            basis vectors is [e_{\perp}, e_n, e_{\parallel}], with the subscripts
            of the tangential vectors indicating that they are perpendicular and
            parallel to the fracture tip (face), respectively.
        """
        basis = np.empty((self.Nd, self.Nd, faces.size))
        signs, cells = g.signs_and_cells_of_boundary_faces(faces)

        basis[0, :, :] = np.reshape(
            g.face_normals[:self.Nd, faces] / g.face_areas[faces] * signs,
            ((self.Nd, faces.size)),
        )
        # Normals of the fracture plane
        if projection.normals.shape[1] == 1:
            basis[1, :, :] = projection.normals
        else:
            basis[1, :, :] = projection.normals[:, cells]
        if g.dim == 2:
            # e2 is parallel to the tip face
            basis[2, :, :] = np.cross(basis[0, :, :], basis[1, :, :], axis=0)
        return basis
Пример #13
0
def partition_metis(g: pp.Grid, num_part: int) -> np.ndarray:
    """
    Partition a grid using metis.

    This function requires that pymetis is installed, as can be done by

        pip install pymetis

    This will install metis itself in addition to the python bindings. There
    are other python bindings for metis as well, but pymetis has behaved well
    so far.

    Parameters:
        g: core.grids.grid: To be partitioned. Only the cell_face attribute is
            used
        num_part (int): Number of partitions.

    Returns:
        np.array (size:g.num_cells): Partition vector, one number in
            [0, num_part) for each cell.

    """
    try:
        import pymetis
    except ImportError:
        warnings.warn(
            "Could not import pymetis. Partitioning by metis will not work.")
        raise ImportError("Cannot partition by pymetis")

    # Connection map between cells
    c2c = g.cell_connection_map()

    # Convert the cells into the format required by pymetis
    adjacency_list = [list(c2c.getrow(i).indices) for i in range(c2c.shape[0])]
    # Call pymetis
    # It seems it is important that num_part is an int, not an int.
    part = pymetis.part_graph(int(num_part), adjacency=adjacency_list)

    # The meaning of the first number returned by pymetis is not clear (poor
    # documentation), only return the partitioning.
    return np.array(part[1])
Пример #14
0
 def _domain_boundary_sides(
     self, g: pp.Grid
 ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray,
            np.ndarray, np.ndarray, ]:
     """
     Obtain indices of the faces of a grid that lie on each side of the domain
     boundaries.
     """
     tol = 1e-10
     box = self.box
     east = g.face_centers[0] > box["xmax"] - tol
     west = g.face_centers[0] < box["xmin"] + tol
     north = g.face_centers[1] > box["ymax"] - tol
     south = g.face_centers[1] < box["ymin"] + tol
     if self._Nd == 2:
         top = np.zeros(g.num_faces, dtype=bool)
         bottom = top.copy()
     else:
         top = g.face_centers[2] > box["zmax"] - tol
         bottom = g.face_centers[2] < box["zmin"] + tol
     all_bf = g.get_boundary_faces()
     return all_bf, east, west, north, south, top, bottom
Пример #15
0
    def bc_values(self, g: pp.Grid):
        """ Mechanical stress values as ISC

        All faces are Neumann, except 3 faces fixed
        by self.faces_to_fix(g), which are Dirichlet.
        """
        # Retrieve the domain boundary
        all_bf, east, west, north, south, top, bottom = self.domain_boundary_sides(g)

        # Boundary values
        bc_values = np.zeros((g.dim, g.num_faces))

        # --- mechanical state ---
        # Get outward facing normal vectors for domain boundary, weighted for face area

        # 1. Get normal vectors on the faces. These are already weighed by face area.
        bf_normals = g.face_normals
        # 2. Adjust direction so they face outwards
        flip_normal_to_outwards = np.where(g.cell_face_as_dense()[0, :] >= 0, 1, -1)
        outward_normals = bf_normals * flip_normal_to_outwards
        bf_stress = np.dot(self.stress, outward_normals[:, all_bf])
        bc_values[:, all_bf] += bf_stress / self.scalar_scale  # Mechanical stress

        # --- gravitational forces ---
        # See init-method to turn on/off gravity effects (Default: ON)
        if self._gravity_bc:
            lithostatic_bc = self._adjust_stress_for_depth(g, outward_normals)

            # NEUMANN
            bc_values[:, all_bf] += lithostatic_bc[:, all_bf] / self.scalar_scale

        # DIRICHLET
        faces = self.faces_to_fix(g)
        bc_values[:, faces] = 0  # / self.length scale

        return bc_values.ravel("F")
Пример #16
0
def switch_sign_if_inwards_normal(
    g: pp.Grid, nd: int, faces: np.ndarray
) -> sps.spmatrix:
    """Construct a matrix that changes sign of quantities on faces with a
    normal that points into the grid.

    Parameters:
        g (pp.Grid): Grid.
        nd (int): Number of quantities per face; this will for instance be the
            number of components in a face-vector.
        faces (np.array-like of ints): Index for which faces to be considered. Should only
            contain boundary faces.

    Returns:
        sps.dia_matrix: Diagonal matrix which switches the sign of faces if the
            normal vector of the face points into the grid g. Faces not considered
            will have a 0 diagonal term. If nd > 1, the first nd rows are associated
            with the first face, then nd elements of the second face etc.

    """

    faces = np.asarray(faces)

    # Find out whether the boundary faces have outwards pointing normal vectors
    # Negative sign implies that the normal vector points inwards.
    sgn, _ = g.signs_and_cells_of_boundary_faces(faces)

    # Create vector with the sign in the places of faces under consideration,
    # zeros otherwise
    sgn_mat = np.zeros(g.num_faces)
    sgn_mat[faces] = sgn
    # Duplicate the numbers, the operator is intended for vector quantities
    sgn_mat = np.tile(sgn_mat, (nd, 1)).ravel(order="F")

    # Create the diagonal matrix.
    return sps.dia_matrix((sgn_mat, 0), shape=(sgn_mat.size, sgn_mat.size))
Пример #17
0
def split_specific_faces(
    gh: pp.Grid,
    face_cell_list: List[sps.spmatrix],
    faces: np.ndarray,
    cells: np.ndarray,
    gl_ind: np.ndarray,
    non_planar: bool = False,
):
    """
    For a given pair of gh and gl.
    Split only the faces specified by faces (higher-dimensional), corresponding
    to new cells (lower-dimensional). gl_ind identifies gl in
    face_cell_list, i.e.
        face_cell_list[gl_ind] = gb.edge_props((gh, gl), 'face_cells')
    """
    # Idea behind this loop is not clear. Likely change, in case we invoke this
    # function for several g_l is to send fc, not face_cell_list to update_face_cells.
    # However, the implications of this, e.g. with updates of face indices etc.
    # are not clear
    for f_c in face_cell_list:
        # We first we duplicate faces along tagged faces. The duplicate
        # faces will share the same nodes as the original faces,
        # however, the new faces are not yet added to the cell_faces map
        # (to save computation).
        face_id = _duplicate_specific_faces(gh, faces)

        # Update the mapping between higher-dimensional faces and lower-dimensional
        # cells.
        face_cell_list = _update_face_cells(face_cell_list, face_id, gl_ind,
                                            cells)
        if face_id.size == 0:
            return face_cell_list

        # We now set the cell_faces map based on which side of the
        # fractures the cells lie.
        if non_planar:
            # In this case, a loop over the elements in face_id should do the job.
            flag_array: List[int] = []
            for fi in face_id:
                n = np.reshape(gh.face_normals[:, fi], (3, 1))
                n = n / np.linalg.norm(n)
                x0 = np.reshape(gh.face_centers[:, fi], (3, 1))
                this_flag: int = update_cell_connectivity(
                    gh, np.array([fi]), n, x0)
                flag_array.append(this_flag)

            if np.allclose(np.asarray(flag_array), 0):
                flag = 0
            elif np.allclose(np.asarray(flag_array), -1):
                flag = -1
            else:
                # Not sure what to do here - probably a partial update of connectivity
                raise ValueError("Split only some faces in non-planar ")
        else:
            # The fracture is considered flat, we can use the same normal vector
            # for all faces. This should make the computations faster
            n = np.reshape(gh.face_normals[:, face_id[0]], (3, 1))
            n = n / np.linalg.norm(n)
            x0 = np.reshape(gh.face_centers[:, face_id[0]], (3, 1))
            flag = update_cell_connectivity(gh, face_id, n, x0)

        if flag == 0:
            # if flag== 0 we added left and right faces (if it is -1 no faces
            # was added and we don't have left and right face pairs).
            # we now add the new faces to the frac_pair array.
            left = face_id
            right = np.arange(gh.num_faces - face_id.size, gh.num_faces)
            gh.frac_pairs = np.hstack((gh.frac_pairs, np.vstack(
                (left, right))))
        return face_cell_list
Пример #18
0
    def discretize(self,
                   g: pp.Grid,
                   data: Dict,
                   d_name: str = "darcy_flux") -> None:
        """
        Return the matrix and righ-hand side for a discretization of a scalar
        linear transport problem using the upwind scheme.
        Note: the vector field is assumed to be given as the normal velocity,
        weighted with the face area, at each face.
        Note: if not specified the inflow boundary conditions are no-flow, while
        the outflow boundary conditions are open.

        The name of data in the input dictionary (data) are:
        darcy_flux : array (g.num_faces)
            Normal velocity at each face, weighted by the face area.
        bc : boundary conditions (optional)
        bc_val : dictionary (optional)
            Values of the boundary conditions. The dictionary has at most the
            following keys: 'dir' and 'neu', for Dirichlet and Neumann boundary
            conditions, respectively.
        source : array (g.num_cells) of source (positive) or sink (negative) terms.
        num_components (int, optional): Number of components to be advected. Defaults
            to 1.

        Parameters
        ----------
        g : grid, or a subclass, with geometry fields computed.
        data: dictionary to store the data.
        d_name: (string) keyword for data field in data containing the dischages

        Return
        ------
        matrix: sparse csr (g.num_cells, g_num_cells)
            Upwind matrix obtained from the discretization.
        rhs: array (g_num_cells)
            Right-hand side which contains the boundary conditions.

        Examples
        --------
        data = {'darcy_flux': u, 'bc': bnd, 'bc_val': bnd_val}
        advect = upwind.Upwind()
        U, rhs = advect.matrix_rhs(g, data)

        data = {'deltaT': advect.cfl(g, data)}
        M, _ = mass.MassMatrix().matrix_rhs(g, data)

        M_minus_U = M - U
        invM = mass.MassMatrix().inv(M)

        # Loop over the time
        for i in np.arange( N ):
            conc = invM.dot((M_minus_U).dot(conc) + rhs)
        """

        parameter_dictionary: Dict[str,
                                   Any] = data[pp.PARAMETERS][self.keyword]
        matrix_dictionary: Dict[str, sps.spmatrix] = data[
            pp.DISCRETIZATION_MATRICES][self.keyword]

        # Shortcut for point grids
        if g.dim == 0:
            matrix_dictionary[self.upwind_matrix_key] = sps.csr_matrix((0, 1))
            matrix_dictionary[self.rhs_matrix_key] = sps.csr_matrix((0, 0))
            return

        darcy_flux: np.ndarray = parameter_dictionary[d_name]
        bc: pp.BoundaryCondition = parameter_dictionary["bc"]

        # Discretization is made of two parts: One is a mapping from the upstream cell
        # to the face. Second is a scaling of that upstream function with the Darcy
        # flux. Then boundary conditions of course complicate everything.

        # booleans of flux direction
        pos_flux = darcy_flux >= 0
        neg_flux = np.logical_not(pos_flux)

        # Array to store index of the cell in the usptream direction
        upstream_cell_ind = np.zeros(g.num_faces, dtype=int)
        # Fill the array based on the cell-face relation. By construction, the normal
        # vector of a face points from the first to the second row in this array
        cf_dense = g.cell_face_as_dense()
        # Positive fluxes point in the same direction as the normal vector, find the
        # upstream cell
        upstream_cell_ind[pos_flux] = cf_dense[0, pos_flux]
        upstream_cell_ind[neg_flux] = cf_dense[1, neg_flux]

        # Make row and data arrays, preparing to make an coo-matrix for the upstream
        # cell-to-face map.
        row = np.arange(g.num_faces)
        values = np.ones(g.num_faces, dtype=int)

        # We need to eliminate faces on the boundary; these will be discretized
        # separately below. On faces with Neumann conditions, boundary conditions apply
        # for both inflow and outflow. For Dirichlet, only inflow conditions are given;
        # for outflow, we use upstream weighting (thus no need to modify the matrix
        # we are about to build).

        # faces with Neumann conditions
        neumann_ind = np.where(bc.is_neu)[0]

        # Faces with Dirichlet conditions and inflow. The latter is identified by
        # considering the direction of the flux, and the upstream element in cf_dense
        # (note that the exterior of the domain is represented by -1 in cf_dense).
        inflow_ind = np.where(
            np.logical_and(
                bc.is_dir,
                np.logical_or(
                    np.logical_and(pos_flux, cf_dense[0] < 0),
                    np.logical_and(neg_flux, cf_dense[1] < 0),
                ),
            ))[0]

        # Delete indices that should be treated by boundary conditions
        delete_ind = np.sort(np.r_[neumann_ind, inflow_ind])
        row = np.delete(row, delete_ind)
        values = np.delete(values, delete_ind)
        col = np.delete(upstream_cell_ind, delete_ind)

        # Finally we can construct the upstream weighting matrix.
        upstream_mat = sps.coo_matrix(
            (
                values,
                (row, col),
            ),
            shape=(g.num_faces, g.num_cells),
        ).tocsr()

        # Scaling with Darcy fluxes is a diagonal matrix
        flux_mat = sps.dia_matrix((darcy_flux, 0),
                                  shape=(g.num_faces, g.num_faces))

        # Form and store disrcetization matrix
        # Expand the discretization matrix to more than one component
        num_components: int = parameter_dictionary.get("num_components", 1)
        product = flux_mat * upstream_mat
        matrix_dictionary[self.upwind_matrix_key] = sps.kron(
            product, sps.eye(num_components)).tocsr()

        ## Boundary conditions
        # Since the upwind discretization could be comibned with a diffusion discretization
        # in an advection-diffusion equation, treatment of boundary conditions can be a
        # a bit delicate, and the code should be used with some caution. The below
        # implementation follows the following steps:
        #
        # 1) On Neumann boundaries the precribed boundary value should effectively
        # be added to the adjacent cell, with the convention that influx (so
        # negative boundary value) should correspond to accumulation.
        # 2) On Neumann outflow conditions, a separate discretization matrix is constructed.
        #    This has been tested for advective problems only.
        # 3) On Dirichlet boundaries, we consider only inflow boundaries.
        #
        # IMPLEMENTATION NOTE: The isolation of outflow Neumann condition in a separate
        # discretization matrix may not be necessary, the reason is partly poorly understood
        # assumptions in a legacy implementation.

        # For Neumann faces we need to assign the sign of the divergence, to
        # counteract multiplication with the same sign when the divergence is
        # applied (e.g. in self.assemble_matrix).
        sgn_div = pp.fvutils.scalar_divergence(g).sum(axis=0).A.squeeze()

        row = np.hstack([neumann_ind, inflow_ind])
        col = row
        # Need minus signs on both Neumann and Dirichlet data to ensure that accumulation
        # follows from negative fluxes.
        values_bc = np.hstack([-sgn_div[neumann_ind], -darcy_flux[inflow_ind]])
        bc_discr = sps.coo_matrix((values_bc, (row, col)),
                                  shape=(g.num_faces, g.num_faces)).tocsr()

        # Expand matrix to the right number of components, and store it
        matrix_dictionary[self.rhs_matrix_key] = sps.kron(
            bc_discr, sps.eye(num_components)).tocsr()

        ## Neumann outflow conditions
        # Outflow Neumann boundaries
        outflow_neu = np.logical_and(
            bc.is_neu,
            np.logical_or(
                np.logical_and(pos_flux, sgn_div > 0),
                np.logical_and(neg_flux, sgn_div < 0),
            ),
        )
        # Copy the Neumann flux matrix.
        neumann_flux_mat = flux_mat.copy()
        # We know flux_mat is diagonal, and can safely edit the data directly.
        # Note that the data is stored as a 2d array.
        neumann_flux_mat.data[0][np.logical_not(outflow_neu)] = 0

        # Use a simple cell to face map here; this will pick up cells that are both
        # upstream and downstream to Neumann faces, however, the latter will have
        # their flux filtered away.
        cell_2_face = np.abs(g.cell_faces)

        # Add minus sign to be consistent with other boundary terms
        matrix_dictionary[self.outflow_neumann_matrix_key] = -sps.kron(
            neumann_flux_mat * cell_2_face, sps.eye(num_components)).tocsr()
Пример #19
0
    def discretize(self, g: pp.Grid, data: Dict) -> None:
        """Discretize a second order elliptic equation using a dual virtual element
        method.

        We assume the following two sub-dictionaries to be present in the data
        dictionary:
            parameter_dictionary, storing all parameters.
                Stored in data[pp.PARAMETERS][self.keyword].
            matrix_dictionary, for storage of discretization matrices.
                Stored in data[pp.DISCRETIZATION_MATRICES][self.keyword]
            deviation_from_plane_tol: The geometrical tolerance, used in the check to
                rotate 2d and 1d grids

        parameter_dictionary contains the entries:
            second_order_tensor: (pp.SecondOrderTensor) Permeability defined
                cell-wise. This is the effective permeability, including any
                aperture scalings etc.

        matrix_dictionary will be updated with the following entries:
            mass: sps.csc_matrix (g.num_faces, g.num_faces) The mass matrix.
            div: sps.csc_matrix (g.num_cells, g.num_faces) The divergence matrix.

        Optional parameter:
        --------------------
        is_tangential: Whether the lower-dimensional permeability tensor has been
            rotated to the fracture plane. Defaults to False. Stored in the data
            dictionary.
        """
        # Allow short variable names in backend function
        # pylint: disable=invalid-name

        # Get dictionary for discretization matrix storage
        matrix_dictionary = data[pp.DISCRETIZATION_MATRICES][self.keyword]
        # If a 0-d grid is given then we return an identity matrix
        if g.dim == 0:
            mass = sps.dia_matrix(([1], 0), (g.num_faces, g.num_faces))
            matrix_dictionary[self.mass_matrix_key] = mass
            matrix_dictionary[self.div_matrix_key] = sps.csr_matrix(
                (g.num_faces, g.num_cells))
            matrix_dictionary[self.vector_proj_key] = sps.csr_matrix(
                (3, g.num_cells))
            return

        # Get dictionary for parameter storage
        parameter_dictionary = data[pp.PARAMETERS][self.keyword]
        # Retrieve the permeability
        k = parameter_dictionary["second_order_tensor"]
        # Identity tensor for vector source computation
        identity = pp.SecondOrderTensor(kxx=np.ones(g.num_cells))

        faces, cells, sign = sps.find(g.cell_faces)
        index = np.argsort(cells)
        faces, sign = faces[index], sign[index]

        # Map the domain to a reference geometry (i.e. equivalent to compute
        # surface coordinates in 1d and 2d)
        deviation_from_plane_tol = data.get("deviation_from_plane_tol", 1e-5)
        c_centers, f_normals, f_centers, R, dim, _ = pp.map_geometry.map_grid(
            g, deviation_from_plane_tol)

        if not data.get("is_tangential", False):
            # Rotate the permeability tensor and delete last dimension
            if g.dim < 3:
                k = k.copy()
                k.rotate(R)
                remove_dim = np.where(np.logical_not(dim))[0]
                k.values = np.delete(k.values, (remove_dim), axis=0)
                k.values = np.delete(k.values, (remove_dim), axis=1)

        # In the virtual cell approach the cell diameters should involve the
        # apertures, however to keep consistency with the hybrid-dimensional
        # approach and with the related hypotheses we avoid.
        diams = g.cell_diameters()
        # Weight for the stabilization term
        weight = np.power(diams, 2 - g.dim)

        # Allocate the data to store matrix entries, that's the most efficient
        # way to create a sparse matrix.
        size_A = np.sum(
            np.square(g.cell_faces.indptr[1:] - g.cell_faces.indptr[:-1]))
        rows_A = np.empty(size_A, dtype=np.int)
        cols_A = np.empty(size_A, dtype=np.int)
        data_A = np.empty(size_A)
        idx_A = 0

        # Allocate the data to store matrix P entries
        size_P = 3 * np.sum(g.cell_faces.indptr[1:] - g.cell_faces.indptr[:-1])
        rows_P = np.empty(size_P, dtype=np.int)
        cols_P = np.empty(size_P, dtype=np.int)
        data_P = np.empty(size_P)
        idx_P = 0
        idx_row_P = 0

        # define the function to compute the inverse of the permeability matrix
        if g.dim == 1:
            inv_matrix = self._inv_matrix_1d
        elif g.dim == 2:
            inv_matrix = self._inv_matrix_2d
        elif g.dim == 3:
            inv_matrix = self._inv_matrix_3d

        for c in np.arange(g.num_cells):
            # For the current cell retrieve its faces
            loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1])
            faces_loc = faces[loc]

            # Compute the H_div-mass local matrix
            A = self.massHdiv(
                k.values[0:g.dim, 0:g.dim, c],
                inv_matrix(k.values[0:g.dim, 0:g.dim, c]),
                c_centers[:, c],
                g.cell_volumes[c],
                f_centers[:, faces_loc],
                f_normals[:, faces_loc],
                sign[loc],
                diams[c],
                weight[c],
            )[0]

            # Compute the flux reconstruction matrix
            P = np.zeros((3, faces_loc.size))
            P[dim, :] = self.massHdiv(
                identity.values[0:g.dim, 0:g.dim, c],
                identity.values[0:g.dim, 0:g.dim, c],
                c_centers[:, c],
                g.cell_volumes[c],
                f_centers[:, faces_loc],
                f_normals[:, faces_loc],
                sign[loc],
                diams[c],
            )[1]
            P = np.dot(R.T, P) / diams[c]

            # Save values for Hdiv-mass local matrix in the global structure
            cols = np.concatenate(faces_loc.size * [[faces_loc]])
            loc_idx = slice(idx_A, idx_A + A.size)
            rows_A[loc_idx] = cols.T.ravel()
            cols_A[loc_idx] = cols.ravel()
            data_A[loc_idx] = A.ravel()
            idx_A += A.size

            # Save values for projection P local matrix in the global structure
            loc_idx = slice(idx_P, idx_P + P.size)
            cols_P[loc_idx] = np.concatenate(3 * [[faces_loc]]).ravel()
            rows_P[loc_idx] = np.repeat(np.arange(3),
                                        faces_loc.size) + idx_row_P
            data_P[loc_idx] = P.ravel()
            idx_P += P.size
            idx_row_P += 3

        # Construct the global matrices
        mass = sps.coo_matrix((data_A, (rows_A, cols_A)))
        div = -g.cell_faces.T
        proj = sps.coo_matrix((data_P, (rows_P, cols_P)))

        matrix_dictionary[self.mass_matrix_key] = mass
        matrix_dictionary[self.div_matrix_key] = div
        matrix_dictionary[self.vector_proj_key] = proj
Пример #20
0
def _update_connectivity_fracture_grid(
        g_l: pp.Grid,  # higher dimensional grid
        g_h: pp.Grid,  # lower dimensional grid
        nodes_l: np.ndarray,  # nodes in g_h involved in the propagation
        nodes_h: np.ndarray,  # nodes in g_l involved in the propagation
        n_old_nodes_l: int,  # number of nodes in g_l before splitting
        n_old_faces_l: int,  # number of faces in g_l before splitting
        n_old_cells_l: int,  # number of cells in g_l before splitting
        faces_h,  # faces in g_h to be split
) -> Tuple[int, np.ndarray]:
    """
    Update of cell_faces of the lower grid after insertion of new cells at the
    higher-dimensional faces_h. Also tags the faces as domain_boundary or tip
    Should be called after initialization of tags
    and geometry of g_l by append_face_geometry and append_face_tags.

    """
    # Extract immediate information

    # Each split face gives a new cell in g_l
    n_new_cells_l = faces_h.size
    # index of the new cells in g_l. These are appended to the existing cells
    new_cells_l = np.arange(n_old_cells_l, n_old_cells_l + n_new_cells_l)

    # Initialize fields for new faces in g_l
    new_faces_l = np.empty((g_l.dim, 0), dtype=np.int)
    new_face_centers_l = np.empty((3, 0))

    # Counter of total number of faces in g_l
    face_counter_l = n_old_faces_l

    # Copy what is to be updated: Cell-face and face-node relation in g_l
    old_cell_faces = g_l.cell_faces.copy()
    old_face_nodes = g_l.face_nodes.copy()

    # Get the face_node indices to form lower-dimensional faces on the form
    # [[nodes of face 1], [nodes of face 2], ...], i.e., array where each face
    # is represented by the nodes it consists of.
    # ASSUMPTION: This breaks if not all faces have the same number of cells
    # Rewrite is possible, but more technical
    all_faces_l = np.reshape(g_l.face_nodes.indices, (g_l.dim, n_old_faces_l),
                             order="f")

    # Initialize indices and values for the cell_faces update
    ind_f, ind_c, cf_val = (
        np.empty(0, dtype=np.int),
        np.empty(0, dtype=np.int),
        np.empty(0, dtype=np.int),
    )
    # and for the face_nodes update
    fn_ind_f, fn_ind_n = np.empty(0, dtype=np.int), np.empty(0, dtype=np.int)

    # Loop over all new cells to be created
    for i, c in enumerate(new_cells_l):

        # Find the nodes of the corresponding higher-dimensional face
        face_h = faces_h[i]
        local_nodes_h = g_h.face_nodes[:, face_h].nonzero()[0]

        # Find the nodes' place among the active higher-dimensional nodes, that is,
        # nodes that will be split
        in_unique_nodes = pp.utils.setmembership.ismember_rows(local_nodes_h,
                                                               nodes_h,
                                                               sort=False)[1]

        # Find the corresponding lower-dimensional nodes
        local_nodes_l = np.array(nodes_l[in_unique_nodes], dtype=int)

        # Get geometry information
        local_pts = g_l.nodes[:, local_nodes_l]
        # The new cell center is taken as the mean of the node coordinates.
        # This should be okay for simplexes, not sure what we get for general cells.
        local_cell_center = np.mean(local_pts, axis=1)

        # Store face center for the update of g_l.face_centers

        # Faces are defined by one node in 1d and two in 2d. This requires
        # dimension-dependent treatment:
        if g_l.dim == 2:
            # Sort nodes clockwise (!)
            # ASSUMPTION: This assumes that the new cell is star-shaped with respect to the
            # local cell center. This should be okay.
            map_to_sorted = pp.utils.sort_points.sort_point_plane(
                local_pts, local_cell_center)
            sorted_nodes_l = local_nodes_l[map_to_sorted]
            sorted_nodes_h = local_nodes_h[map_to_sorted]

            # Define the faces of the new cell c (size: 2 x faces_per_cell_l). "Duplicate"
            # of the higher dimension used for tag identification.
            faces_l = np.vstack((sorted_nodes_l,
                                 np.append(sorted_nodes_l[1:],
                                           sorted_nodes_l[0])))
            local_faces_h = np.vstack((sorted_nodes_h,
                                       np.append(sorted_nodes_h[1:],
                                                 sorted_nodes_h[0])))

        else:
            # Faces and nodes are 1:1, but ismember_rows (below) requires 2d array
            faces_l = np.atleast_2d(local_nodes_l)
            local_faces_h = np.atleast_2d(local_nodes_h)

        # Now the faces of c are defined by sorted_nodes_l
        # and their arrangement in faces_l.
        n_local_faces_l = faces_l.shape[-1]

        # Check which faces exist in g_l already, either from before propgation
        # or from previous runs through current loop:
        (exist, existing_faces_l) = pp.utils.setmembership.ismember_rows(
            faces_l, all_faces_l)
        # The existing faces are no longer tips (but internal).
        g_l.tags["tip_faces"][existing_faces_l] = False

        # Number of genuinely new local faces in g_l created for this cell
        n_new_local_faces_l = np.sum(~exist)

        # Index of the new faces, they will be appended to the face array
        new_face_indices_l = np.arange(face_counter_l,
                                       face_counter_l + n_new_local_faces_l)
        # Update face counter to be ready for the next cell
        face_counter_l += n_new_local_faces_l

        ## Assign tags to the new faces
        # First expand tag arrays to make space for new faces
        _append_face_tags(g_l, n_new_local_faces_l)

        # The existing faces are tagged according to the information from the
        # node tags of g_h.
        fi = local_faces_h[:, ~exist]

        # The new faces are either on the domain boundary, or tip faces
        domain_boundary_faces = np.all(g_h.tags["domain_boundary_nodes"][fi],
                                       axis=0)
        g_l.tags["tip_faces"][new_face_indices_l] = ~domain_boundary_faces
        g_l.tags["domain_boundary_faces"][
            new_face_indices_l] = domain_boundary_faces

        # Expand array of face-nodes in g_l
        all_faces_l = np.append(all_faces_l, faces_l[:, ~exist], axis=1)

        # Find node indices faces to be updated.
        ind_n_local = faces_l[:, ~exist]
        # TODO: What happens here if ~exist is more than one face?
        local_pts = g_l.nodes[:, ind_n_local]
        local_face_centers = np.mean(local_pts, axis=1)

        # New face center set to the mean of the face's vertexes.
        # This is reasonable at least for simplex (and Cartesian) faces
        new_face_centers_l = np.append(new_face_centers_l,
                                       np.atleast_2d(local_face_centers),
                                       axis=1)
        new_faces_l = np.append(new_faces_l, ind_n_local, axis=1)

        # Expand face-node and cell-face relations
        # Build index of all local faces (both new, and already existing)
        all_local_faces = np.empty(faces_l.shape[-1])
        all_local_faces[exist] = existing_faces_l
        all_local_faces[~exist] = new_face_indices_l

        # Add both existing and new faces to face-nodes.
        # Why include exist here, they should have been added already?
        # Answer: We could have dropped it, but this will simply add the same
        # information twice to the face-node relation. Since this has boolean
        # data, adding a 2 instead of a 1 will make no difference.
        ind_f_local = np.tile(all_local_faces, g_l.dim)
        fn_ind_f = np.append(fn_ind_f, ind_f_local)
        fn_ind_n = np.append(fn_ind_n, faces_l)

        # Cell-face relation
        # Here all faces should be added, existing or not
        ind_f = np.append(ind_f, all_local_faces)
        ind_c = np.append(ind_c, c * np.ones(n_local_faces_l))

        # To get the sign correct, some work is needed.
        # We distinguish between three cases
        # 1) This is a new face. We will assign positive sign, thus outer normal
        # 2) This is a face which existed before we entered the loop over
        #    cells. The sign will be oposite of that used in the previous occurence
        #    of the face
        # 3) This is a face that has been added before for a previous new cell.
        #    The sign will be oposite of when first added, that is, -1.
        cf_val_loc = np.zeros(n_local_faces_l)
        # The faces that did not exist, are assigned sign 1
        # (should get outer normal)
        cf_val_loc[~exist] = 1

        # Find faces that were in the original grid (before entering the outer loop
        # over cells)
        are_in_original = existing_faces_l < n_old_faces_l

        # Faces that existed before the cell loop
        ind_in_original = existing_faces_l[are_in_original]
        # Index of these faces in cf_val_loc
        ind_local = np.in1d(all_local_faces, ind_in_original)

        if g_l.cell_faces[ind_in_original].data.size != ind_in_original.size:
            # This situation can happen in 3d (perhaps also 2d).
            # It will likely correspond to a strangly shaped fracture.
            # Implementation of such geometries seems complex, if at all desirable.
            # The suggested solution is to patch the face splitting algorithm so that
            # this does not happen.
            raise ValueError(
                "Cannot split the same lower-dimensional face twice")

        # The sign of this cell should be the oposite of that used in the
        # original grid.
        cf_val_loc[ind_local] = -g_l.cell_faces.tocsr()[
            ind_in_original, :].data

        # Faces that were not in the original grid, but were added before this iteration
        # of the cell loop
        ind_not_in_original = existing_faces_l[~are_in_original]
        # Index of these faces in cf_val_loc
        ind_not_local = np.in1d(all_local_faces, ind_not_in_original)
        # These are assigned the value -1; since it was given +1 when first added
        # to cf_val (see call cf_val_loc[~exist] above)
        cf_val_loc[ind_not_local] = -1

        # Store signs of cf_val. This effectively is the sign of the normal vectors
        cf_val = np.append(cf_val, cf_val_loc)

    # Done with the expansion of all faces and cells. What is left is to update
    # g_l.cell_faces and face_nodes.

    # Resize and update face_nodes ...
    g_l.face_nodes = sps.csc_matrix((g_l.num_nodes, face_counter_l),
                                    dtype=bool)
    g_l.face_nodes[:n_old_nodes_l, :n_old_faces_l] = old_face_nodes
    g_l.face_nodes[fn_ind_n, fn_ind_f] = True
    g_l.face_nodes.eliminate_zeros()

    # ... and cell_faces
    g_l.cell_faces = sps.csc_matrix(
        (face_counter_l, n_old_cells_l + n_new_cells_l))
    g_l.cell_faces[0:n_old_faces_l, 0:n_old_cells_l] = old_cell_faces
    g_l.cell_faces[ind_f, ind_c] = cf_val
    g_l.cell_faces.eliminate_zeros()
    n_new_faces = face_counter_l - n_old_faces_l
    return n_new_faces, new_face_centers_l
Пример #21
0
def grid_is_connected(
    g: pp.Grid, cell_ind: np.ndarray = None
) -> Tuple[bool, List[np.ndarray]]:
    """
    Check if a grid is fully connected, as defined by its cell_connection_map().

    The function is intended used in one of two ways:
        1) To test if a subgrid will be connected before it is extracted. In
        this case, the cells to be tested is specified by cell_ind.
        2) To check if an existing grid is composed of a single component. In
        this case, all cells are should be included in the analyzis.

    Parameters:
        g (core.grids.grid): Grid to be tested. Only its cell_faces map is
            used.
        cell_ind (np.array): Index of cells to be included when looking for
            connections. Defaults to all cells in the grid.

    Returns:
        boolean: True if the grid is connected.
        list of np.arrays: Each list item contains a np.array with cell indices
            of a connected component.

    Examples:
        >>> g = pp.CartGrid(np.array([2, 2]))
        >>> p = np.array([0, 1])
        >>> is_con, l = grid_is_connected(g, p)
        >>> is_con
        True

        >>> g = pp.CartGrid(np.array([2, 2]))
        >>> p = np.array([0, 3])
        >>> is_con, l = grid_is_connected(g, p)
        >>> is_con
        False

    """
    import networkx

    # If no cell indices are specified, we use them all.
    if cell_ind is None:
        cell_ind = np.arange(g.num_cells)

    # Get connection map for the full grid
    c2c = g.cell_connection_map()

    # Extract submatrix of the active cell set.
    # To slice the sparse matrix, we first convert to row storage, slice rows,
    # and then convert to columns and slice those as well.
    c2c = c2c.tocsr()[cell_ind, :].tocsc()[:, cell_ind]

    # Represent the connections as a networkx graph and check for connectivity
    graph = networkx.from_scipy_sparse_matrix(c2c)
    is_connected = networkx.is_connected(graph)

    # Get the connected components of the network.
    # networkx gives an generator that produce sets of node indices. Use this
    # to define a list of numpy arrays.
    component_generator = networkx.connected_components(graph)
    components = [np.array(list(i)) for i in component_generator]

    return is_connected, components
Пример #22
0
def _duplicate_nodes_with_offset(g: pp.Grid, nodes: np.ndarray,
                                 offset: float) -> int:
    """
    Duplicate nodes on a fracture, and perturb the duplicated nodes. This option
    is useful for visualization purposes.

    NOTE: This is a legacy implementation, which should not be invoked directly.
    Instead use duplicate nodes (more efficient, but without the possibility to
    perturb nodes); that method will invoke the present if a perturbation is
    requested.

    Parameters:
    ----------
    g         - The grid for which the nodes are duplicated
    nodes     - The nodes to be duplicated
    offset    - How far from the original node the duplications should be
                placed.
    """
    node_count = 0

    # We wish to convert the sparse csc matrix to a sparse
    # csr matrix to easily add rows. However, the convertion sorts the
    # indices, which will change the node order when we convert back. We
    # therefore find the inverse sorting of the nodes of each face.
    # After we have performed the row operations we will map the nodes
    # back to their original position.
    _, iv = _sort_sub_list(g.face_nodes.indices, g.face_nodes.indptr)

    g.face_nodes = g.face_nodes.tocsr()
    # Iterate over each internal node and split it according to the graph.
    # For each cell attached to the node, we check wich color the cell has.
    # All cells with the same color is then attached to a new copy of the
    # node.
    cell_nodes = g.cell_nodes().tocsr()

    for node in nodes:
        # t_node takes into account the added nodes.
        t_node = node + node_count
        # Find cells connected to node

        cells = np.unique(sparse_mat.slice_indices(cell_nodes, node))
        # Find the color of each cell. A group of cells is given the same color
        # if they are connected by faces. This means that all cells on one side
        # of a fracture will have the same color, but a different color than
        # the cells on the other side of the fracture. Equivalently, the cells
        # at a X-intersection will be given four different colors
        colors = _find_cell_color(g, cells)
        # Find which cells share the same color
        colors, ix = np.unique(colors, return_inverse=True)

        # copy coordinate of old node
        new_nodes = np.repeat(g.nodes[:, t_node, None], colors.size, axis=1)
        faces = np.array([], dtype=int)
        face_pos = np.array([g.face_nodes.indptr[t_node]])
        assert g.cell_faces.getformat() == "csc"
        assert g.face_nodes.getformat() == "csr"
        faces_of_node_t = sparse_mat.slice_indices(g.face_nodes, t_node)

        for j in range(colors.size):
            # For each color we wish to add one node. First we find all faces that
            # are connected to the fracture node, and have the correct cell
            # color
            colored_faces = np.unique(
                sparse_mat.slice_indices(g.cell_faces, cells[ix == j]))

            is_colored = np.in1d(faces_of_node_t,
                                 colored_faces,
                                 assume_unique=True)

            faces = np.append(faces, faces_of_node_t[is_colored])

            # These faces are then attached to new node number j.
            face_pos = np.append(face_pos, face_pos[-1] + np.sum(is_colored))

            # If an offset is given, we will change the position of the nodes.
            # We move the nodes a length of offset away from the fracture(s).
            if offset > 0 and colors.size > 1:
                new_nodes[:, j] -= _avg_normal(
                    g, faces_of_node_t[is_colored]) * offset

                # The total number of faces should not have changed, only their
        # connection to nodes. We can therefore just update the indices and
        # indptr map.
        g.face_nodes.indices[face_pos[0]:face_pos[-1]] = faces
        node_count += colors.size - 1
        g.face_nodes.indptr = np.insert(g.face_nodes.indptr, t_node + 1,
                                        face_pos[1:-1])
        g.face_nodes._shape = (
            g.face_nodes.shape[0] + colors.size - 1,
            g.face_nodes._shape[1],
        )
        # We delete the old node because of the offset. If we do not
        # have an offset we could keep it and add one less node.

        g.nodes = np.delete(g.nodes, t_node, axis=1)
        g.nodes = np.insert(g.nodes, [t_node] * new_nodes.shape[1],
                            new_nodes,
                            axis=1)

        new_point_ind = np.array([g.global_point_ind[t_node]] *
                                 new_nodes.shape[1])
        g.global_point_ind = np.delete(g.global_point_ind, t_node)
        g.global_point_ind = np.insert(g.global_point_ind,
                                       [t_node] * new_point_ind.shape[0],
                                       new_point_ind,
                                       axis=0)

    # Transform back to csc format and fix node ordering.
    g.face_nodes = g.face_nodes.tocsc()
    g.face_nodes.indices = g.face_nodes.indices[iv]  # For fast row operation

    return node_count
Пример #23
0
def refine_grid_1d(g: pp.Grid, ratio: int = 2) -> pp.Grid:
    """Refine cells in a 1d grid.

    Parameters:
        g (pp.Grid): A 1d grid, to be refined.
        ratio (int):

    Returns:
        grid: New grid, with finer cells.

    """

    # Implementation note: The main part of the function is the construction of
    # the new cell-face relation. Since the grid is 1d, nodes and faces are
    # equivalent, and notation used mostly refers to nodes instead of faces.

    # Cell-node relation
    cell_nodes = g.cell_nodes()
    nodes, cells, _ = sps.find(cell_nodes)

    # Every cell will contribute (ratio - 1) new nodes
    num_new_nodes = (ratio - 1) * g.num_cells + g.num_nodes
    x = np.zeros((3, num_new_nodes))
    # Cooridates for splitting of cells
    theta = np.arange(1, ratio) / float(ratio)
    pos = 0
    shift = 0

    # Array that indicates whether an item in the cell-node relation represents
    # a node not listed before (e.g. whether this is the first or second
    # occurence of the cell)
    if_add = np.r_[1, np.ediff1d(cell_nodes.indices)].astype(bool)

    indices = np.empty(0, dtype=int)
    # Template array of node indices for refined cells
    ind = np.vstack((np.arange(ratio), np.arange(ratio) + 1)).flatten("F")
    nd = np.r_[np.diff(cell_nodes.indices)[1::2], 0]

    # Loop over all old cells and refine them.
    for c in np.arange(g.num_cells):
        # Find start and end nodes of the old cell
        loc = slice(cell_nodes.indptr[c], cell_nodes.indptr[c + 1])
        start, end = cell_nodes.indices[loc]

        # Flags for whether this is the first occurences of the the nodes of
        # the old cell. If so, they should be added to the new node array
        if_add_loc = if_add[loc]

        # Local cell-node (thus cell-face) relations of the new grid
        indices = np.r_[indices, shift + ind]

        # Add coordinate of the startpoint to the node array if relevant
        if if_add_loc[0]:
            x[:, pos:(pos + 1)] = g.nodes[:, start, np.newaxis]
            pos += 1

        # Add coordinates of the internal nodes
        x[:, pos:(
            pos + ratio -
            1)] = g.nodes[:, start,
                          np.newaxis] * theta + g.nodes[:, end, np.newaxis] * (
                              1 - theta)
        pos += ratio - 1
        shift += ratio + (2 - np.sum(if_add_loc) * (1 - nd[c])) - nd[c]

        # Add coordinate to the endpoint, if relevant
        if if_add_loc[1]:
            x[:, pos:(pos + 1)] = g.nodes[:, end, np.newaxis]
            pos += 1

    # For 1d grids, there is a 1-1 relation between faces and nodes
    face_nodes = sps.identity(x.shape[1], format="csc")
    cell_faces = sps.csc_matrix((
        np.ones(indices.size, dtype=bool),
        indices,
        np.arange(0, indices.size + 1, 2),
    ))
    g = Grid(1, x, face_nodes, cell_faces, "Refined 1d grid")
    g.compute_geometry()

    return g
Пример #24
0
def _extrude_2d(g: pp.Grid, z: np.ndarray) -> Tuple[pp.Grid, np.ndarray, np.ndarray]:
    """ Extrude a 2d grid into 3d by prismatic extension.

    The original grid is assumed to be in the xy-plane, that is, any existing non-zero
    z-direction is ignored.

    Both the original and the new grid will have their geometry computed.

    Parameters:
        g (pp.Grid): Original grid to be extruded. Should have dimension 2.
        z (np.ndarray): z-coordinates of the nodes in the extruded grid. Should be
            either non-negative or non-positive, and be sorted in increasing or
            decreasing order, respectively.

    Returns:
        pp.Grid: A grid of dimension 3.
        np.array of np.arrays: Cell mappings, so that element ci gives all indices of
            cells in the extruded grid that comes from cell ci in the original grid.
        np.array of np.arrays: Face mappings, so that element fi gives all indices of
            faces in the extruded grid that comes from face fi in the original grid.

    """

    g.compute_geometry()

    negative_extrusion = np.all(z <= 0)

    ## Bookkeeping of the number of grid items

    # Number of nodes in the z-direction
    num_node_layers = z.size
    # Number of cell layers, one less than the nodes
    num_cell_layers = num_node_layers - 1

    # Short hand for the number of cells in the 2d grid
    nc_2d = g.num_cells
    nf_2d = g.num_faces
    nn_2d = g.num_nodes

    # The number of nodes in the 3d grid is given by the number of 2d nodes, and the
    # number of node layers
    nn_3d = nn_2d * num_node_layers
    # The 3d cell count is similar to that for the nodes
    nc_3d = nc_2d * num_cell_layers
    # The number of faces is more intricate: In each layer of cells, there will be as
    # many faces as there is in the 2d grid. In addition, in the direction of extrusion
    # there will be one set of faces per node layer, with each layer containing as many
    # faces as there are cells in the 2d grid
    nf_3d = nf_2d * num_cell_layers + nc_2d * num_node_layers

    ## Nodes - only coorinades are needed
    # The nodes in the 2d grid are copied for all layers, with the z-coordinates changed
    # for each layer. This means that for a vertical pilar, the face-node and cell-node
    # relations can be inferred from that in the original 2d grid, with index increments
    # of size nn_2d
    x_layer = g.nodes[0]
    y_layer = g.nodes[1]

    nodes = np.empty((3, 0))
    # Stack the layers of nodes
    for zloc in z:
        nodes = np.hstack((nodes, np.vstack((x_layer, y_layer, zloc * np.ones(nn_2d)))))

    ## Face-node relations
    # The 3d grid has two types of faces: Those formed by faces in the 2d grid, termed
    # 'vertical' below, and those on the top and bottom of the 3d cells, termed
    # horizontal

    # Face-node relation for the 2d grid. We know there are exactly two nodes in each
    # 2d face.
    fn_2d = g.face_nodes.indices.reshape((2, g.num_faces), order="F")

    # Nodes of the faces for the bottom layer of 3d cells. These are formed by
    # connecting nodes in the bottom layer with those immediately above
    fn_layer = np.vstack((fn_2d[0], fn_2d[1], fn_2d[1] + nn_2d, fn_2d[0] + nn_2d))

    # For the vertical cells, the flux direction indicated in cell_face map will be
    # inherited from the 2d grid (see below). The normal vector, which should be
    # consistent with this value, is effectively computed from the ordering of the
    # face-node relation (and the same is true for several other geometric quantities).
    # This requires that the face-nodes are sorted in a CCW order when seen from the
    # side of a positive cell_face value. To sort this out, we need to flip some of the
    # columns in fn_layer

    # Faces, cells and values of the 2d cell-face map
    [fi, ci, sgn] = sps.find(g.cell_faces)
    # Only consider each face once
    _, idx = np.unique(fi, return_index=True)

    # The node ordering in fn_layer will be CCW seen from cell ci if the cell center of
    # ci is CW relative to the line from the first to the second node of the 2d cell.
    #
    # Example: with p0 = [0, 0, 0], p1 = [1, 0, 0], the 3d face will have further nodes
    #               p2 = [1, 0, 1], p3 = [0, 0, 1].
    # This will be counterclockwise to a 2d cell center of, say, [0.5, -0.5, 0],
    #  (which is CW relative to p0 and p1)
    #
    p0 = g.nodes[:, fn_2d[0, fi[idx]]]
    p1 = g.nodes[:, fn_2d[1, fi[idx]]]
    pc = g.cell_centers[:, ci[idx]]
    ccw_2d = pp.geometry_property_checks.is_ccw_polyline(p0, p1, pc)

    # We should flip those columns in fn_layer where the sign is positive, and the 2d
    # is not ccw (meaning the 3d will be). Similarly, also flip negative signs and 2d
    # ccw.
    flip = np.logical_or(
        np.logical_and(sgn[idx] > 0, np.logical_not(ccw_2d)),
        np.logical_and(sgn[idx] < 0, ccw_2d),
    )

    # Finally, if the extrusion is in the negative direction, the ordering of all
    # face-node relations is the oposite of that indicated above.
    if negative_extrusion:
        flip = np.logical_not(flip)

    fn_layer[:, flip] = fn_layer[np.array([1, 0, 3, 2])][:, flip]

    # The face-node relation for the vertical cells are found by stacking those in the
    # bottom layer, with an appropriate offset. This also implies that the vertical
    # faces of a cell in layer k are the same as the faces of the corresponding 2d cell,
    # with the appropriate adjustments for the number of faces and cells in each layer
    fn_rows_vertical = np.empty((4, 0))
    # Loop over all layers of cells
    for k in range(num_cell_layers):
        fn_rows_vertical = np.hstack((fn_rows_vertical, fn_layer + nn_2d * k))

    # Reshape the node indices into a single array
    fn_rows_vertical = fn_rows_vertical.ravel("F")

    # All vertical faces have exactly four nodes
    nodes_per_face_vertical = 4
    # Aim for a csc-representation of the faces. Column pointers
    fn_cols_vertical = np.arange(
        0, nodes_per_face_vertical * nf_2d * num_cell_layers, nodes_per_face_vertical
    )

    # Next, deal with the horizontal faces. The face-node relation is based on the
    # cell-node relation of the 2d grid.
    # The structure of this constrution is a bit more involved than for the vertical
    # faces, since the 2d cells have an unknown, and generally varying, number of nodes
    cn_2d = g.cell_nodes()

    # Short hand for node indices of each cell.
    cn_ind_2d = cn_2d.indices.copy()

    # Similar to the vertical faces, the face-node relation in 3d should match the
    # sign in the cell-face relation, so that the generated normal vector points out of
    # the cell with cf-value 1.
    # This requires a sorting of the nodes for each cell
    for ci in range(nc_2d):
        # Node indices of this 2d cell
        start = cn_2d.indptr[ci]
        stop = cn_2d.indptr[ci + 1]
        ni = cn_ind_2d[start:stop]

        coord = g.nodes[:2, ni]
        # Sort the points.
        # IMPLEMENTATION NOTE: this probably assumes convexity of the 2d cell.
        sort_ind = pp.utils.sort_points.sort_point_plane(
            np.vstack((coord, np.zeros(coord.shape[1]))),
            g.cell_centers[:, ci].reshape((-1, 1)),
        )
        # Indices that sort the nodes. The sort function contains a rotation, which
        # implies that it is unknown whether the ordering is cw or ccw
        # If the sorted points are ccw, we store them, unless the extrusion is negative
        # in which case the ordering should be cw, and the points are turned.
        if pp.geometry_property_checks.is_ccw_polygon(coord[:, sort_ind]):
            if negative_extrusion:
                cn_ind_2d[start:stop] = cn_ind_2d[start:stop][sort_ind[::-1]]
            else:
                cn_ind_2d[start:stop] = cn_ind_2d[start:stop][sort_ind]
        # Else, the ordering should be negative.
        elif pp.geometry_property_checks.is_ccw_polygon(coord[:, sort_ind[::-1]]):
            if negative_extrusion:
                cn_ind_2d[start:stop] = cn_ind_2d[start:stop][sort_ind]
            else:
                cn_ind_2d[start:stop] = cn_ind_2d[start:stop][sort_ind[::-1]]
        else:
            raise ValueError("this should not happen. Is the cell non-convex??")

    # Compressed column storage for horizontal faces: Store node indices
    fn_rows_horizontal = np.array([], dtype=np.int)
    # .. and pointers to the start of new faces
    fn_cols_horizontal = np.array(0, dtype=np.int)
    # Loop over all layers of nodes (one more than number of cells)
    # This means that the horizontal faces of a given cell is given by its index (bottom)
    # and its index + the number of 2d cells, both offset with the total number of
    # vertical faces
    for k in range(num_node_layers):
        # The horizontal cell-node relation for this layer is the bottom one, plus an
        # offset of the number of 2d nodes, per layer
        fn_rows_horizontal = np.hstack((fn_rows_horizontal, cn_ind_2d + nn_2d * k))
        # The index pointers are those of the 2d cell-node relation.
        # Adjustment for the vertical faces is done below
        # Drop the final element of the 2d indptr, which effectively signifies the end
        # of this array (we will add the corresponding element for the full array below)
        fn_cols_horizontal = np.hstack(
            (fn_cols_horizontal, cn_2d.indptr[1:] + cn_ind_2d.size * k)
        )

    # Add the final element which marks the end of the array
    # fn_cols_horizontal = np.hstack((fn_cols_horizontal, fn_rows_horizontal.size))
    # The horizontal faces are appended to the vertical ones. The node indices are the
    # same, but the face indices must be increased by the number of vertical faces
    num_vertical_faces = nf_2d * num_cell_layers
    fn_cols_horizontal += num_vertical_faces * nodes_per_face_vertical

    # Put together the vertical and horizontal data, create the face-node relation
    indptr = np.hstack((fn_cols_vertical, fn_cols_horizontal)).astype(np.int)
    indices = np.hstack((fn_rows_vertical, fn_rows_horizontal)).astype(np.int)
    data = np.ones(indices.size, dtype=np.int)

    # Finally, construct the face-node sparse matrix
    face_nodes = sps.csc_matrix((data, indices, indptr), shape=(nn_3d, nf_3d))

    ### Next the cell-faces.
    # Similar to the face-nodes, the easiest option is first to deal with the vertical
    # faces, which can be inferred directly from faces in the 2d grid, and then the
    # horizontal direction.
    # IMPLEMENTATION NOTE: Since all cells have both horizontal and vertical faces, and
    # these are found in separate operations, the easiest way to assemble the 3d
    # cell-face matrix is to construct information for a coo-matrix (not compressed
    # storage), and then convert later. This has some overhead, but the alternative
    # is to combine and sort the face indices in the horizontal and vertical components
    # so that all faces of any cell is stored together. This is most conveniently
    # left to scipy sparse .tocsc() function

    ## Vertical faces
    # For the vertical faces, the information from the 2d grid can be copied

    cf_rows_2d = g.cell_faces.indices
    cf_cols_2d = g.cell_faces.indptr
    cf_data_2d = g.cell_faces.data

    cf_rows_vertical = np.array([], dtype=np.int)
    # For the cells, we will store the number of facqes for each cell. This will later
    # be expanded to a full set of cell indices
    cf_vertical_cell_count = np.array([], dtype=np.int)
    cf_data_vertical = np.array([])

    for k in range(num_cell_layers):
        # The face indices are found from the 2d information, with increaments that
        # reflect how many layers of vertical faces there are below
        cf_rows_vertical = np.hstack((cf_rows_vertical, cf_rows_2d + k * nf_2d))
        # The diff here gives the number of faces per cell
        cf_vertical_cell_count = np.hstack(
            (cf_vertical_cell_count, np.diff(cf_cols_2d))
        )
        # The data is just plus and minus ones, no need to adjust
        cf_data_vertical = np.hstack((cf_data_vertical, cf_data_2d))

    # Expand information of the number of faces per cell into a corresponding full set
    # of cell indices
    cf_cols_vertical = pp.utils.matrix_compression.rldecode(
        np.arange(nc_3d), cf_vertical_cell_count
    )

    ## Horizontal faces
    # There is one set of faces per layer of nodes.
    # The cell_face relation will assign -1 to the upper cells, and +1 to lower cells.
    # This corresponds to normal vectors pointing upwards.
    # The bottom and top layers are special, in that they have only one neighboring
    # cell. All other layers have two (they are internal)

    # Bottom layer
    cf_rows_horizontal = num_vertical_faces + np.arange(nc_2d)
    cf_cols_horizontal = np.arange(nc_2d)
    cf_data_horizontal = -np.ones(nc_2d, dtype=np.int)

    # Intermediate layers, note
    for k in range(1, num_cell_layers):
        # Face indices are given twice, for the lower and upper neighboring cell
        # The offset of the face index is the number of vertical faces plus the number
        # of horizontal faces in lower layers
        rows_here = (
            num_vertical_faces
            + k * nc_2d
            + np.hstack((np.arange(nc_2d), np.arange(nc_2d)))
        )
        cf_rows_horizontal = np.hstack((cf_rows_horizontal, rows_here))

        # Cell indices, first of the lower layer, then of the upper
        cols_here = np.hstack(
            ((k - 1) * nc_2d + np.arange(nc_2d), k * nc_2d + np.arange(nc_2d))
        )
        cf_cols_horizontal = np.hstack((cf_cols_horizontal, cols_here))
        # Data: +1 for the lower cells, -1 for the upper
        data_here = np.hstack((np.ones(nc_2d), -np.ones(nc_2d)))
        cf_data_horizontal = np.hstack((cf_data_horizontal, data_here))

    # Top layer, with index offset for all other faces
    cf_rows_horizontal = np.hstack(
        (
            cf_rows_horizontal,
            num_vertical_faces + num_cell_layers * nc_2d + np.arange(nc_2d),
        )
    )
    # Similarly, the cell indices of the topbost layer
    cf_cols_horizontal = np.hstack(
        (cf_cols_horizontal, (num_cell_layers - 1) * nc_2d + np.arange(nc_2d))
    )
    # Only +1 in the data (oposite to lowermost layer)
    cf_data_horizontal = np.hstack((cf_data_horizontal, np.ones(nc_2d)))

    # Merge horizontal and vertical layers
    cf_rows = np.hstack((cf_rows_horizontal, cf_rows_vertical))
    cf_cols = np.hstack((cf_cols_horizontal, cf_cols_vertical))
    cf_data = np.hstack((cf_data_horizontal, cf_data_vertical))

    cell_faces = sps.coo_matrix(
        (cf_data, (cf_rows, cf_cols)), shape=(nf_3d, nc_3d)
    ).tocsc()

    tags = _define_tags(g, num_cell_layers)

    name = g.name.copy()
    name.append("Extrude 2d->3d")
    g_info = g.name.copy()
    g_info.append("Extrude 1d->2d")

    g_new = pp.Grid(3, nodes, face_nodes, cell_faces, g_info, tags=tags)
    g_new.compute_geometry()

    # Mappings between old and new cells and faces
    cell_map, face_map = _create_mappings(g, g_new, num_cell_layers)

    return g_new, cell_map, face_map
Пример #25
0
    def bc_values_mechanics(self, g: pp.Grid) -> np.ndarray:
        """ Mechanical stress values as ISC

        All faces are Neumann, except 3 faces fixed
        by self.faces_to_fix(g), which are Dirichlet.

        If gravity is activated, the stress becomes lithostatic. The provided stress then
        corresponds to a true depth (here set to domain top, zmax). All stress components,
        including off-diagonal, are "scaled" by the depth, relative to the true depth.
        I.e. larger (compressive) stress below the true depth, and smaller above.
        """
        ss, ls = self.params.scalar_scale, self.params.length_scale
        # Retrieve the domain boundary
        all_bf, *_ = self.domain_boundary_sides(g)

        # Boundary values
        bc_values = np.zeros((g.dim, g.num_faces))

        # --- mechanical state ---
        # Get outward facing normal vectors for domain boundary, weighted for face area

        # 1. Get normal vectors on the faces. These are already weighed by face area.
        bf_normals: np.ndarray = g.face_normals
        # 2. Adjust direction so they face outwards
        flip_normal_to_outwards = np.where(g.cell_face_as_dense()[0, :] >= 0, 1, -1)
        outward_normals: np.ndarray = bf_normals * flip_normal_to_outwards
        bf_stress = np.dot(self.params.stress, outward_normals[:, all_bf])
        # Mechanical stress
        bc_values[:, all_bf] += bf_stress * (pp.PASCAL / ss)

        # --- gravitational forces ---
        # Boundary stresses are assumed to increase linearly with depth.
        # We assume all components of the tensor increase linearly, with
        # a factor relative to the pure vertical stress component.
        if self.params.gravity:
            # Set (unscaled) depth in the local coordinate system of the domain
            # where we consider the measured stress exact ..
            true_stress_depth = (
                (self.bounding_box["zmax"] + self.bounding_box["zmin"]) / 2 * ls
            )
            # .. and compute the relative (unscaled) depths in terms this reference depth.
            relative_depths: np.ndarray = (g.face_centers[2] * ls) - true_stress_depth

            # If the vertical stress is zero, raise.
            if np.abs(self.params.stress[2, 2]) < 1e-12:
                raise ValueError("Cannot set gravity if vertical stress is zero")
            # Each stress component scales relative to the vertical stress.
            stress_scaler = self.params.stress / self.params.stress[2, 2]

            # Lithostatic pressure
            gravity: np.ndarray = self.params.rock.lithostatic_pressure(relative_depths)
            lithostatic_stress = stress_scaler.dot(
                np.multiply(outward_normals, gravity)
            )
            lithostatic_bc = lithostatic_stress[:, all_bf]

            bc_values[:, all_bf] += lithostatic_bc * (pp.PASCAL / ss)

        # DIRICHLET
        faces = self.faces_to_fix(g)
        bc_values[:, faces] = 0  # / self.length scale

        return bc_values.ravel("F")
Пример #26
0
def _duplicate_specific_faces(gh: pp.Grid, frac_id: np.ndarray) -> np.ndarray:
    """
    Duplicate faces of gh specified by frac_id.
    """

    # Find which of the faces to split are tagged with a standard face tag,
    # that is, as fracture, tip or domain_boundary
    rem = tags.all_face_tags(gh.tags)[frac_id]

    # Set the faces to be split to fracture faces
    # Q: Why only if the face already had a tag (e.g., why [rem])?
    # Possible answer: We wil not split them (see redefinition of frac_id below),
    # but want them to be tagged as fracture_faces
    gh.tags["fracture_faces"][frac_id[rem]] = True
    # Faces to be split should not be tip
    gh.tags["tip_faces"][frac_id] = False

    # Only consider previously untagged faces for splitting
    frac_id = frac_id[~rem]
    if frac_id.size == 0:
        return frac_id

    # Expand the face-node relation to include duplicated nodes
    # Do this by directly manipulating the CSC-format of the matrix
    # Nodes of the target faces
    node_start = gh.face_nodes.indptr[frac_id]
    node_end = gh.face_nodes.indptr[frac_id + 1]
    nodes = gh.face_nodes.indices[mcolon(node_start, node_end)]

    # Start point for the new columns. They will be appended to the matrix, thus
    # the offset of the previous size of gh.face_nodes
    added_node_pos = np.cumsum(node_end -
                               node_start) + gh.face_nodes.indptr[-1]
    # Sanity checks
    assert added_node_pos.size == frac_id.size
    assert added_node_pos[-1] - gh.face_nodes.indptr[-1] == nodes.size
    # Expand row-data by adding node indices
    gh.face_nodes.indices = np.hstack((gh.face_nodes.indices, nodes))
    # Expand column pointers
    gh.face_nodes.indptr = np.hstack((gh.face_nodes.indptr, added_node_pos))
    # Expand data array
    gh.face_nodes.data = np.hstack(
        (gh.face_nodes.data, np.ones(nodes.size, dtype=bool)))
    # Update matrix shape
    gh.face_nodes._shape = (gh.num_nodes,
                            gh.face_nodes.shape[1] + frac_id.size)
    assert gh.face_nodes.indices.size == gh.face_nodes.indptr[-1]

    # We also copy the attributes of the original faces.
    gh.num_faces += frac_id.size
    gh.face_normals = np.hstack((gh.face_normals, gh.face_normals[:, frac_id]))
    gh.face_areas = np.append(gh.face_areas, gh.face_areas[frac_id])
    gh.face_centers = np.hstack((gh.face_centers, gh.face_centers[:, frac_id]))

    # Not sure if this still does the correct thing. Might have to
    # send in a logical array instead of frac_id.
    gh.tags["fracture_faces"][frac_id] = True
    gh.tags["tip_faces"][frac_id] = False
    update_fields = gh.tags.keys()
    update_values: List[List[np.ndarray]] = [[]] * len(update_fields)
    for i, key in enumerate(update_fields):
        # faces related tags are doubled and the value is inherit from the original
        if key.endswith("_faces"):
            update_values[i] = gh.tags[key][frac_id]
    tags.append_tags(gh.tags, update_fields, update_values)

    return frac_id
Пример #27
0
def update_cell_connectivity(g: pp.Grid, face_id: np.ndarray,
                             normal: np.ndarray, x0: np.ndarray) -> int:
    """
    After the faces in a grid are duplicated, we update the cell connectivity
    list. Cells on the right side of the fracture do not change, but the cells
    on the left side are attached to the face duplicates. We assume that all
    faces that have been duplicated lie in the same plane. This plane is
    described by a normal and a point, x0. We attach cell on the left side of
    the plane to the duplicate of face_id. The cell on the right side is
    attached to the face frac_id.

    Parameters:
    ----------
    g         - The grid for wich the cell_face mapping is uppdated
    frac_id   - Indices of the faces that have been duplicated
    normal    - Normal of faces that have been duplicated. Note that we assume
                that all faces have the same normal
    x0        - A point in the plane where the faces lie

    Returns:
    ----------
    int: Flag that informs on what action has been taken. 0 means g.cell_faces has been
        split. -1 means the fracture was on the boundary, and no action taken.

    Raises:
    ----------
    ValueError: If the fracture is not planar

    """

    # We find the cells attached to the tagged faces.
    g.cell_faces = g.cell_faces.tocsr()
    cell_frac = g.cell_faces[face_id, :]
    cell_face_id = np.argwhere(cell_frac)

    # We devide the cells into the cells on the right side of the fracture
    # and cells on the left side of the fracture.
    left_cell = half_space_int(normal, x0, g.cell_centers[:, cell_face_id[:,
                                                                          1]])

    if np.all(left_cell) or not np.any(left_cell):
        # Fracture is on boundary of domain. There is nothing to do.
        # Remove the extra faces. We have not yet updated cell_faces,
        # so we should not delete anything from this matrix.
        rem = np.arange(g.cell_faces.shape[0], g.num_faces)
        remove_faces(g, rem, rem_cell_faces=False)
        return -1

    # Assume that fracture is either on boundary (above case) or completely
    # innside domain. Check that each face added two cells:
    if sum(left_cell) * 2 != left_cell.size:
        raise ValueError("Fractures must either be"
                         "on boundary or completely innside domain")

    # We create a cell_faces mapping for the new faces. This will be added
    # on the end of the excisting cell_faces mapping. We have here assumed
    # that we do not add any mapping during the duplication of faces.
    col = cell_face_id[left_cell, 1]
    row = cell_face_id[left_cell, 0]
    data = np.ravel(g.cell_faces[np.ravel(face_id[row]), col])
    assert data.size == face_id.size
    cell_frac_left = sps.csr_matrix((data, (row, col)),
                                    (face_id.size, g.cell_faces.shape[1]))

    # We now update the cell_faces map of the faces on the right side of
    # the fracture. These faces should only be attached to the right cells.
    # We therefore remove their connection to the cells on the left side of
    # the fracture.
    col = cell_face_id[~left_cell, 1]
    row = cell_face_id[~left_cell, 0]
    data = np.ravel(g.cell_faces[np.ravel(face_id[row]), col])
    cell_frac_right = sps.csr_matrix((data, (row, col)),
                                     (face_id.size, g.cell_faces.shape[1]))

    assert g.cell_faces.getformat() == "csr"

    sparse_mat.merge_matrices(g.cell_faces, cell_frac_right, face_id)

    # And then we add the new left-faces to the cell_face map. We do not
    # change the sign of the matrix since we did not flip the normals.
    # This means that the normals of right and left cells point in the same
    # direction, but their cell_faces values have oposite signs.
    sparse_mat.stack_mat(g.cell_faces, cell_frac_left)
    g.cell_faces = g.cell_faces.tocsc()

    return 0
Пример #28
0
def overlap(
    g: pp.Grid, cell_ind: np.ndarray, num_layers: int, criterion: str = "node"
) -> np.ndarray:
    """
    From a set of cell indices, find an extended set of cells that form an
    overlap (in the domain decomposition sense).

    The cell set is increased by including all cells that share at least one
    node with the existing set. When multiple layers are asked for, this
    process is repeated.

    The definition of neighborship is specified by criterion. Possible options
    are 'face' (each layer will add cells that share a face with the active
    face set), or 'node' (each layer will add cells sharing a vertex with the
    active set).

    Parameters:
        g (core.grids.grid): The grid; the cell-node relation will be used to
            extend the cell set.
        cell_ind (np.array): Cell indices, the initial cell set.
        num_layers (int): Number of overlap layers.
        criterion (str, optional): Which definition of neighborship to apply.
            Should be either 'face' or 'node'. Default is 'node'.

    Returns:
        np.array: Indices of the extended cell set.

    Examples:
        >>> g = pp.CartGrid([5, 5])
        >>> ci = np.array([0, 1, 5, 6])
        >>> overlap(g, ci, 1)
        array([ 0,  1,  2,  5,  6,  7, 10, 11, 12])

    """

    # Boolean storage of cells in the active set; these are the ones that will
    # be in the overlap
    active_cells = np.zeros(g.num_cells, dtype=np.bool)
    # Initialize by the specified cells
    active_cells[cell_ind] = 1

    if criterion.lower().strip() == "node":
        # Construct cell-node map, its transpose will be a node-cell map
        cn = g.cell_nodes()

        # Also introduce active nodes
        active_nodes = np.zeros(g.num_nodes, dtype=np.bool)

        # Gradually increase the size of the cell set
        for _ in range(num_layers):
            # Nodes are found via the mapping
            active_nodes[np.squeeze(np.where((cn * active_cells) > 0))] = 1
            # Map back to new cells
            ci_new = np.squeeze(np.where((cn.transpose() * active_nodes) > 0))
            # Activate new cells.
            active_cells[ci_new] = 1

    elif criterion.lower().strip() == "face":
        # Create a version of g.cell_faces with only positive values for
        # connections, e.g. let go of the divergence property
        cf = g.cell_faces
        # This avoids overwriting data in cell_faces.
        data = np.ones_like(cf.data)
        cf = sps.csc_matrix((data, cf.indices, cf.indptr))

        active_faces = np.zeros(g.num_faces, dtype=np.bool)

        # Gradually increase the size of the cell set
        for _ in range(num_layers):
            # All faces adjacent to an active cell
            active_faces[np.squeeze(np.where((cf * active_cells) > 0))] = 1
            # Map back to active cells, including that on the other side of the
            # newly found faces
            ci_new = np.squeeze(np.where((cf.transpose() * active_faces) > 0))
            # Activate new cells
            active_cells[ci_new] = 1

    # Sort the output, this should not be a disadvantage
    return np.sort(np.squeeze(np.argwhere(active_cells > 0)))
Пример #29
0
def coarse_fine_cell_mapping(
    g: pp.Grid, g_ref: pp.Grid, point_in_poly_tol=1e-8
) -> sps.csc_matrix:
    """ Construct a mapping between cells of a grid and its refined version

    Assuming a regular and a refined mesh, where the refinement is executed by
    splitting.
    I.e. a cell in the refined grid is completely contained within a cell
    in the coarse grid.

    Parameters
    ----------
    g : pp.Grid
        Coarse grid
    g_ref : pp.Grid
        Refined grid
    point_in_poly_tol : float, Optional
        Tolerance for pp.geometry_property_checks.point_in_polyhedron()

    Returns
    -------
    coarse_fine : sps.csc_matrix
        Column major sparse matrix mapping from coarse to fine cells.


    This method creates a mapping from fine to coarse cells by creating
    a matrix 'M' where the rows represent the fine cells while the
    columns represent the coarse cells. In practice this means that
    for an array 'p', of known values on a coarse grid, by applying
    the mapping
        q = M * p
    each value in a coarse cell will now be transferred to all the
    fine cells contained within the coarse cell.

    The procedure for creating this mapping relies on two main assumptions.
        1. Each fine cell is fully contained inside exactly one coarse cell.
        2. Each cell can be characterised as a simplex.
            - i.e. Every node except one defines every face of the object.

    The first assumption implies that the problem of assessing if a fine
    cell is contained within a coarse cell is reduced to assessing if
    the center of a fine cell is contained within the coarse cell.

    The second assumption implies that a cell in any dimension (1D, 2D, 3D)
    will be a convex object. This way, we can use existing algorithms in
    PorePy to find if a point is inside a polygon (2D) or polyhedron (3D).
    (The 1D case is trivial)

    The general algorithm is as follows (refer to start of for-loop in
    the code):
    1. Make a list of (pointers to) untested cell centers called
        'test_cells_ptr'.
    2. Iterate through all coarse cells. Now, consider one of these:
    3. For all untested cell centers (defined by 'test_cells_ptr'),
        check if they are inside the coarse cell.
    4. Those that pass (is inside the coarse cell) add to the mapping,
        then remove those point from the list of untested cell centers.
    5. Assemble the mapping.

    """

    assert g.num_cells < g_ref.num_cells, "Wrong order of input grids"
    assert g.dim == g_ref.dim, "Grids must be of same dimension"

    # 1. Step: Create a list of tuples pointing to the (start, end)
    # index of the nodes of each cell on the coarse grid.
    cell_nodes = g.cell_nodes()
    # start/end row pointers for each column
    nodes_of_cell_ptr = zip(cell_nodes.indptr[:-1], cell_nodes.indptr[1:])

    # 2. Step: Initialize a sps.csc_matrix mapping fine cells to
    # coarse cells.
    indptr = np.array([0])
    indices = np.empty(0)

    cells_ref = g_ref.cell_centers.copy()  # Cell centers in fine grid
    test_cells_ptr = np.arange(g_ref.num_cells)  # Pointer to cell centers
    nodes = g.nodes.copy()

    # 3. Step: If the grids are in 1D or 2D, we can simplify the
    # calculation by rotating the coordinate system to local
    # coordinates. For example, a 2D grid embedded in 3D
    # would be "rotated" so that each coordinate
    #           is of the form (x, y, 0).
    if g.dim == 1:
        # Rotate coarse nodes and fine cell centers to align with the x-axis
        tangent = pp.map_geometry.compute_tangent(nodes)
        reference = [1, 0, 0]
        R = pp.map_geometry.project_line_matrix(nodes, tangent, reference=reference,)
        nodes = R.dot(nodes)[0, :]
        cells_ref = R.dot(cells_ref)[0, :]

    elif g.dim == 2:  # Pre-processing for efficiency
        # Rotate coarse nodes and fine cell centers to the xy-plane.
        R = pp.map_geometry.project_plane_matrix(nodes, check_planar=False)
        nodes = np.dot(R, nodes)[:2, :]
        cells_ref = np.dot(R, cells_ref)[:2, :]

    # 4. Step: Loop through every coarse cell
    for st, nd in nodes_of_cell_ptr:

        nodes_idx = cell_nodes.indices[st:nd]
        num_nodes = nodes_idx.size

        # 5. Step: for the appropriate grid dimension, test all cell centers not already
        # found to be inside some other coarse cell.
        # 'in_poly' is a boolean array that is True for the points inside and False for
        # the points not inside.
        if g.dim == 1:
            # In 1D, we use a numpy method to check which coarse cell the fine points
            # are inside.
            assert num_nodes == 2, "We assume a 'cell' in 1D is defined by two points."
            line = np.sort(nodes[nodes_idx])
            test_points = cells_ref[test_cells_ptr]
            in_poly = np.searchsorted(line, test_points, side="left") == 1

        elif g.dim == 2:
            assert num_nodes == 3, "We assume simplexes in 2D (i.e. 3 nodes)"
            polygon = nodes[:, nodes_idx]
            test_points = cells_ref[:, test_cells_ptr]
            in_poly = pp.geometry_property_checks.point_in_polygon(
                poly=polygon, p=test_points, tol=point_in_poly_tol
            )

        elif g.dim == 3:
            # Make polyhedron from node coordinates.
            # Polyhedron defined as a list of nodes defining its (convex) faces.
            # Assumes simplexes: Every node except one defines every face.
            assert num_nodes == 4, "We assume simplexes in 3D (i.e. 4 nodes)"
            node_coords = nodes[:, nodes_idx]

            ids = np.arange(num_nodes)
            polyhedron = [node_coords[:, ids != i] for i in np.arange(num_nodes)]
            # Test only points not inside another polyhedron.
            test_points = cells_ref[:, test_cells_ptr]
            in_poly = pp.geometry_property_checks.point_in_polyhedron(
                polyhedron=polyhedron, test_points=test_points, tol=point_in_poly_tol
            )

        else:
            logger.warning(f"A grid of dimension {g.dim} encountered. Skip!")
            continue

        # 6. Step: Update pointer to which cell centers to use as test points
        in_poly_ids = test_cells_ptr[in_poly]  # id of cells inside this polyhedron
        # Keep only cells not inside this polyhedron
        test_cells_ptr = test_cells_ptr[~in_poly]

        # Update mapping
        indices = np.append(indices, in_poly_ids)
        indptr = np.append(indptr, indptr[-1] + in_poly_ids.size)

    # 7. Step: assemble the sparse matrix with the mapping.
    data = np.ones(indices.size)
    coarse_fine = sps.csc_matrix((data, indices, indptr))

    assert (
        indices.size == g_ref.num_cells
    ), "Every fine cell should be inside exactly one coarse cell"
    return coarse_fine
Пример #30
0
def _update_geometry(
    g_h: pp.Grid,
    g_l: pp.Grid,
    new_cells: np.ndarray,
    n_old_cells_l: int,
    n_old_faces_l: int,
) -> None:
    # Update geometry on each iteration to ensure correct tags.

    # The geometry of the higher-dimensional grid can be computed straightforwardly.
    g_h.compute_geometry()

    if g_h.dim == 2:
        # 1d geometry computation is valid also for manifolds
        g_l.compute_geometry()
    else:
        # The implementation of 2d compute_geometry() assumes that the
        # grid is planar. The simplest option is to treat one cell at
        # a time, and then merge the arrays at the end.

        # Initialize arrays for geometric quantities
        fa = np.empty(0)  # Face areas
        fc = np.empty((3, 0))  # Face centers
        fn = np.empty((3, 0))  # Face normals
        cv = np.empty(0)  # Cell volumes
        cc = np.empty((3, 0))  # Cell centers
        # Many of the faces will have their quantities computed twice,
        # once from each side. Keep track of which faces we are dealing with
        face_ind = np.array([], dtype=np.int)

        for ci in new_cells:
            sub_g, fi, _ = pp.partition.extract_subgrid(g_l, ci)
            sub_g.compute_geometry()

            fa = np.append(fa, sub_g.face_areas)
            fc = np.append(fc, sub_g.face_centers, axis=1)
            fn = np.append(fn, sub_g.face_normals, axis=1)
            cv = np.append(cv, sub_g.cell_volumes)
            cc = np.append(cc, sub_g.cell_centers, axis=1)

            face_ind = np.append(face_ind, fi)

        # The new cell geometry is composed of values from the previous grid, and
        # the values computed one by one for the new cells
        g_l.cell_volumes = np.hstack((g_l.cell_volumes[:n_old_cells_l], cv))
        g_l.cell_centers = np.hstack((g_l.cell_centers[:, :n_old_cells_l], cc))

        # For the faces, more work is needed
        face_areas = np.zeros(g_l.num_faces)
        face_centers = np.zeros((3, g_l.num_faces))
        face_normals = np.zeros((3, g_l.num_faces))

        # For the old faces, transfer already computed values
        face_areas[:n_old_faces_l] = g_l.face_areas[:n_old_faces_l]
        face_centers[:, :n_old_faces_l] = g_l.face_centers[:, :n_old_faces_l]
        face_normals[:, :n_old_faces_l] = g_l.face_normals[:, :n_old_faces_l]

        for fi in range(n_old_faces_l, g_l.num_faces):
            # Geometric quantities for this face
            hit = np.where(face_ind == fi)[0]
            # There should be 1 or 2 hits
            assert hit.size > 0 and hit.size < 3

            # For areas and centers, the computations based on the two neighboring
            # cells should give the same result. Check, and then use the value.
            mean_area = np.mean(fa[hit])
            mean_center = np.mean(fc[:, hit], axis=1)
            assert np.allclose(fa[hit], mean_area)
            assert np.allclose(fc[:, hit], mean_center.reshape((3, 1)))
            face_areas[fi] = mean_area
            face_centers[:, fi] = mean_center

            # The normal is more difficult, since this is not unique.
            # The direction of the normal vectors computed from subgrids should be
            # consistent with the +- convention in the main grid.

            # Normal vectors found for this global face
            normals = fn[:, hit]
            if normals.size == 3:
                normals = normals.reshape((3, 1))

            # For the moment, use the mean of the two values.
            mean_normal = np.mean(normals, axis=1)

            face_normals[:, fi] = mean_normal / np.linalg.norm(
                mean_normal) * mean_area

        # Sanity check
        # assert np.allclose(np.linalg.norm(face_normals, axis=0), face_areas)

        # Store computed values
        g_l.face_areas = face_areas
        g_l.face_centers = face_centers
        g_l.face_normals = face_normals