Esempio n. 1
0
 def set_grid(self, gb: pp.GridBucket):
     """ Set a new grid
     """
     self.gb = gb
     self.Nd = gb.dim_max()
     self.n_frac = gb.get_grids(lambda _g: _g.dim == self.Nd - 1).size
     self.gb.add_node_props(keys="name")  # Add 'name' as node prop to all grids.
Esempio n. 2
0
def grid_error(mappings, gb: pp.GridBucket, gb_ref: pp.GridBucket, variable,
               variable_dof):
    """ Compute grid errors for a given mapping

    The mapping is the one computed by gb_coarse_fine_cell_mapping()
    """
    # TODO: Fix this method. Something is wrong when passing a mapping to it.
    errors = {}

    for g, g_ref, mapping in mappings:
        # print(i, pair)
        # g = mappings[i][0]
        # g_ref = mappings[i][1]
        # mapping = mappings[i][2]

        assert g.num_cells < g_ref.num_cells

        data = gb.node_props(g)
        data_ref = gb_ref.node_props(g_ref)

        errors[data['node_number']] = {}  # Initialize this dict entry

        states = data[pp.STATE]
        states_ref = data_ref[pp.STATE]

        # TODO: Add some limitation to which keys you want to check,
        #  or how you should compute errors over certain types of keys
        state_keys = set(states.keys())
        state_ref_keys = set(states_ref.keys())
        check_keys = state_keys.intersection(state_ref_keys)

        if variable not in check_keys:
            logger.info(f"{variable} not present on grid number "
                        f"{gb.node_props(g, 'node_number')} of dim {g.dim}.")

        sol = states[variable].reshape((-1, variable_dof))
        mapped_sol = mapping.dot(sol).reshape((-1, 1))
        sol_ref = states_ref[variable]

        absolute_error = np.linalg.norm(mapped_sol - sol_ref)

        norm_ref = np.linalg.norm(sol_ref)
        if norm_ref < 1e-5:
            logger.warning(f"Relative error not reportable. "
                           f"Norm of reference solution is {norm_ref}. "
                           f"Reporting absolute error")
            relative_error = -1

        relative_error = absolute_error / norm_ref

        errors[data['node_number']] = {
            variable: {
                'absolute_error': absolute_error,
                'relative_error': relative_error
            }
        }

    return errors
Esempio n. 3
0
    def set_parameters_cell_basis(self, gb: pp.GridBucket, data: Dict):
        """
        Assign parameters for the micro gb. Very simple for now, this must be improved.

        Args:
            gb (TYPE): the micro gb.

        Returns:
            None.

        """
        # First initialize data
        for g, d in gb:

            d["Aavatsmark_transmissibilities"] = True

            domain_boundary = np.logical_and(
                g.tags["domain_boundary_faces"],
                np.logical_not(g.tags["fracture_faces"]),
            )

            boundary_faces = np.where(domain_boundary)[0]
            if domain_boundary.size > 0:
                bc_type = boundary_faces.size * ["dir"]
            else:
                bc_type = np.empty(0)

            bc = pp.BoundaryCondition(g, boundary_faces, bc_type)
            if hasattr(g, "face_on_macro_bound"):
                micro_ind = g.face_on_macro_bound
                macro_ind = g.macro_face_ind

                bc.is_neu[micro_ind] = data["bc_macro"]["bc"].is_neu[macro_ind]
                bc.is_dir[micro_ind] = data["bc_macro"]["bc"].is_dir[macro_ind]

            param = {"bc": bc}
            perm = data["g_data"](g)["second_order_tensor"]
            param["second_order_tensor"] = perm
            param["specific_volume"] = data["g_data"](g)["specific_volume"]

            # Use python inverter for mpfa for small problems, where it does not pay off
            # to fire up numba. The set threshold value is somewhat randomly picked.
            if g.num_cells < 100:
                param["mpfa_inverter"] = "python"

            pp.initialize_default_data(g, d, self.keyword, param)

        for e, d in gb.edges():
            mg = d["mortar_grid"]
            g1, g2 = gb.nodes_of_edge(e)
            param = {}
            if not hasattr(g1, "is_auxiliary") or not g1.is_auxiliary:
                check_P = mg.secondary_to_mortar_avg()
                param.update(data["e_data"](mg, g1, g2, check_P))

            pp.initialize_data(mg, d, self.keyword, param)
Esempio n. 4
0
def gb_coarse_fine_cell_mapping(
        gb: pp.GridBucket, gb_ref: pp.GridBucket, tol=1e-8
):
    """ Wrapper for coarse_fine_cell_mapping to construct mapping for grids in GridBucket.

    Parameters
    ----------
    gb : pp.GridBucket
        Coarse grid bucket
    gb_ref : pp.GridBucket
        Refined grid bucket
    tol : float, Optional
        Tolerance for point_in_poly* -methods

    Returns
    -------
    mapping : list of tuples with entries (pp.GridBucket, pp.GridBucket, sps.csc_matrix)
        The first entry is the coarse grid.
        The second entry is the refined grid.
        The third entry is the mapping from coarse to fine cells
    """

    grids = gb.get_grids()
    grids_ref = gb_ref.get_grids()

    assert len(grids) == len(grids_ref), "Weakly check that GridBuckets refer to same domains"
    assert np.array_equal(np.append(*gb.bounding_box()), np.append(*gb_ref.bounding_box())), \
        "Weakly check that GridBuckets refer to same domains"

    # This method assumes a consistent node ordering between grids. At least assign one.
    gb.assign_node_ordering(overwrite_existing=False)
    gb_ref.assign_node_ordering(overwrite_existing=False)

    n_grids = len(grids)
    # mappings = [None]*n_grids
    mappings = {'gb': gb, 'gb_ref': gb_ref}

    for i in np.arange(n_grids):
        g, g_ref = grids[i], grids_ref[i]
        node_num, node_num_ref = gb._nodes[g]['node_number'], gb_ref._nodes[g_ref]['node_number']

        assert node_num == node_num_ref, "Weakly check that grids refer to same domain."

        mapping = coarse_fine_cell_mapping(g, g_ref, tol=tol)

        mappings[(g, g_ref)] = {'node_number': node_num,
                                'data': gb.node_props(g),
                                'data_ref': gb_ref.node_props(g_ref)}

    return mappings
Esempio n. 5
0
    def gb(self, gb: pp.GridBucket):
        """ Set a grid bucket to the class
        """
        self._gb = gb
        if gb is None:
            return
        pp.contact_conditions.set_projections(self.gb)
        self.gb.add_node_props(
            keys=["name"])  # Add 'name' as node prop to all grids.

        # Set the bounding box
        self.bounding_box = gb.bounding_box(as_dict=True)

        # Set Nd grid name
        self.gb.set_node_prop(self._nd_grid(),
                              key="name",
                              val=self.params.intact_name)

        # Set fracture grid names
        if self.params.n_frac > 0:
            fracture_grids = self.gb.get_grids(lambda g: g.dim == self.Nd - 1)
            assert (
                len(fracture_grids) == self.params.n_frac
            ), "There should be equal number of Nd-1 fractures as shearzone names"
            # We assume that order of fractures on grid creation (self.create_grid)
            # is preserved.
            for i, sz_name in enumerate(self.params.shearzone_names):
                self.gb.set_node_prop(fracture_grids[i],
                                      key="name",
                                      val=sz_name)
Esempio n. 6
0
def _tag_injection_cell(
    gb: pp.GridBucket, g: pp.Grid, pts: np.ndarray, length_scale
) -> None:
    """ Helper method to tag find closest point on g to pts

    The tag is set locally to g and to node props on gb.
    length_scale is used to log the unscaled distance to
    the injection cell from pts.

    Parameters
    ----------
    gb : pp.GridBucket
    g : pp.Grid
    pts : np.ndarray, shape: (3,1)
    length_scale : float

    """
    assert pts.shape == (3, 1), "We only consider one point; array needs shape 3x1"
    tags = np.zeros(g.num_cells)
    ids, dsts = g.closest_cell(pts, return_distance=True)
    tags[ids] = 1
    g.tags["well_cells"] = tags
    d = gb.node_props(g)
    pp.set_state(d, {"well": tags})

    # Log information on the injection point
    logger.info(
        f"Closest cell found has (unscaled) distance: {dsts[0] * length_scale:4f}\n"
        f"ideal (scaled) point coordinate: {pts.T}\n"
        f"nearest (scaled) cell center coordinate: {g.cell_centers[:, ids].T}\n"
    )
Esempio n. 7
0
    def __init__(self, gb: pp.GridBucket) -> None:

        self.gb = gb

        # Counter for block index
        block_dof_counter = 0

        # Dictionary that maps node/edge + variable combination to an index.
        block_dof: Dict[Tuple[Union[pp.Grid, Tuple[pp.Grid, pp.Grid]], str], int] = {}

        # Storage for number of dofs per variable per node/edge, with respect
        # to the ordering specified in block_dof
        full_dof: List[int] = []

        for g, d in gb:
            if pp.PRIMARY_VARIABLES not in d:
                continue

            for local_var, local_dofs in d[pp.PRIMARY_VARIABLES].items():
                # First assign a block index.
                # Note that the keys in the dictionary is a tuple, with a grid
                # and a variable name (str)
                block_dof[(g, local_var)] = block_dof_counter
                block_dof_counter += 1

                # Count number of dofs for this variable on this grid and store it.
                # The number of dofs for each grid entitiy type defaults to zero.
                total_local_dofs = (
                    g.num_cells * local_dofs.get("cells", 0)
                    + g.num_faces * local_dofs.get("faces", 0)
                    + g.num_nodes * local_dofs.get("nodes", 0)
                )
                full_dof.append(total_local_dofs)

        for e, d in gb.edges():
            if pp.PRIMARY_VARIABLES not in d:
                continue

            mg: pp.MortarGrid = d["mortar_grid"]

            for local_var, local_dofs in d[pp.PRIMARY_VARIABLES].items():

                # First count the number of dofs per variable. Note that the
                # identifier here is a tuple of the edge and a variable str.
                block_dof[(e, local_var)] = block_dof_counter
                block_dof_counter += 1

                # We only allow for cell variables on the mortar grid.
                # This will not change in the foreseeable future
                total_local_dofs = mg.num_cells * local_dofs.get("cells", 0)
                full_dof.append(total_local_dofs)

        # Array version of the number of dofs per node/edge and variable
        self.full_dof: np.ndarray = np.array(full_dof)
        self.block_dof: Dict[
            Tuple[Union[pp.Grid, Tuple[pp.Grid, pp.Grid]], str], int
        ] = block_dof
Esempio n. 8
0
def nd_injection_cell_center(params: FlowParameters, gb: pp.GridBucket) -> None:
    """ Tag the center cell of the nd-grid with 1 (injection)

    Parameters
    ----------
    params : FlowParameters
    gb : pp.GridBucket

    """

    # Get the center of the domain.
    box = gb.bounding_box()
    pts = (box[1] + box[0]) / 2  # center of domain
    pts = np.atleast_2d(pts).T

    # Get the Nd-grid
    nd_grid = gb.grids_of_dimension(gb.dim_max())[0]

    # Tag highest dim grid with 1 in the cell closest to the grid center
    _tag_injection_cell(gb, nd_grid, pts, params.length_scale)
def _tag_ivar_well_cells(_, gb: pp.GridBucket) -> None:
    """
    Tag well cells with unitary values, positive for injection cells and negative
    for production cells.
    """
    box = gb.bounding_box(as_dict=True)
    nd = gb.dim_max()
    for g, d in gb:
        tags = np.zeros(g.num_cells)
        if g.dim < nd:
            point = np.array([[(box["xmin"] + box["xmax"]) / 2], [box["ymax"]], [0],])
            distances = pp.distances.point_pointset(point, g.cell_centers)
            indexes = np.argsort(distances)
            if d["node_number"] == 1:
                tags[indexes[-1]] = 1  # injection
            elif d["node_number"] == 3:
                tags[indexes[-1]] = -1  # production
                # write_well_cell_to_csv(g, indexes[-1], self)
        g.tags["well_cells"] = tags
        pp.set_state(d, {"well": tags.copy()})
Esempio n. 10
0
def center_of_shearzone_injection_cell(
    params: FlowParameters, gb: pp.GridBucket
) -> None:
    """ Tag the center cell of the given shear zone with 1 (injection)

    Parameters
    ----------
    params : FlowParameters
    gb : pp.GridBucket
    """

    # Shorthand
    shearzone = params.source_scalar_borehole_shearzone.get("shearzone")

    # Get the grid to inject to
    frac: pp.Grid = gb.get_grids(lambda g: gb.node_props(g, "name") == shearzone)[0]
    centers: np.ndarray = frac.cell_centers
    pts = np.atleast_2d(np.mean(centers, axis=1)).T

    # Tag injection grid with 1 in the injection cell
    _tag_injection_cell(gb, frac, pts, params.length_scale)
Esempio n. 11
0
def shearzone_injection_cell(params: FlowParameters, gb: pp.GridBucket) -> None:
    """ Tag the borehole - shearzone intersection cell with 1 (injection)

    Parameters
    ----------
    params : FlowParameters
    gb : pp.GridBucket
    """
    # Shorthand
    shearzone = params.source_scalar_borehole_shearzone.get("shearzone")

    # Get intersection point
    pts = shearzone_borehole_intersection(params)

    # Get the grid to inject to
    injection_grid = gb.get_grids(lambda g: gb.node_props(g, "name") == shearzone)[0]
    assert (
        injection_grid.dim == gb.dim_max() - 1
    ), "Injection grid should be a Nd-1 fracture"

    # Tag injection grid with 1 in the injection cell
    _tag_injection_cell(gb, injection_grid, pts, params.length_scale)
Esempio n. 12
0
    def parse(self, gb: pp.GridBucket):

        bc_val = self._bc.parse(gb)  # TODO Is this done anyhow already?
        keyword = self._bc.keyword
        g = self._g[0]
        data = gb.node_props(g)
        bc = data[pp.PARAMETERS][keyword]["bc"]
        is_dir = bc.is_dir
        is_not_dir = np.logical_not(is_dir)
        dir_bc_val = bc_val.copy()
        dir_bc_val[is_not_dir] = float("NaN")

        return dir_bc_val
Esempio n. 13
0
    def parse(self, gb: pp.GridBucket) -> np.ndarray:
        """Convert the Ad expression into numerical values for the boundary conditions,
        in the form of an np.ndarray concatenated for all grids.

        Pameteres:
            gb (pp.GridBucket): Mixed-dimensional grid. The boundary condition will be
                taken from the data dictionaries with the relevant keyword.

        Returns:
            np.ndarray: Value of boundary conditions.

        """
        val = []
        for g in self._g:
            data = gb.node_props(g)
            val.append(data[pp.PARAMETERS][self.keyword]["bc_values"])

        return np.hstack([v for v in val])
Esempio n. 14
0
def nd_sides_shearzone_injection_cell(
    params: FlowParameters, gb: pp.GridBucket, reset_frac_tags: bool = True,
) -> None:
    """ Tag the Nd cells surrounding a shear zone injection point

    Parameters
    ----------
    params : FlowParameters
        parameters that contain "source_scalar_borehole_shearzone"
        (with "shearzone", and "borehole") and "length_scale".
    gb : pp.GridBucket
        grid bucket
    reset_frac_tags : bool [Default: True]
        if set to False, keep injection tag in the shear zone.
    """
    # Shorthand
    shearzone = params.source_scalar_borehole_shearzone.get("shearzone")

    # First, tag the fracture cell, and get the tag
    shearzone_injection_cell(params, gb)
    fracture = gb.get_grids(lambda g: gb.node_props(g, "name") == shearzone)[0]
    tags = fracture.tags["well_cells"]
    # Second, map the cell to the Nd grid
    nd_grid: pp.Grid = gb.grids_of_dimension(gb.dim_max())[0]
    data_edge = gb.edge_props((fracture, nd_grid))
    mg: pp.MortarGrid = data_edge["mortar_grid"]

    slave_to_master_face = mg.mortar_to_master_int() * mg.slave_to_mortar_int()
    face_to_cell = nd_grid.cell_faces.T
    slave_to_master_cell = face_to_cell * slave_to_master_face
    nd_tags = np.abs(slave_to_master_cell) * tags

    # Set tags on the nd-grid
    nd_grid.tags["well_cells"] = nd_tags
    ndd = gb.node_props(nd_grid)
    pp.set_state(ndd, {"well": tags})

    if reset_frac_tags:
        # reset tags on the fracture
        zeros = np.zeros(fracture.num_cells)
        fracture.tags["well_cells"] = zeros
        d = gb.node_props(fracture)
        pp.set_state(d, {"well": zeros})
Esempio n. 15
0
    def __init__(self, params, gb: pp.GridBucket):
        super().__init__(params)

        self.gb = gb
        self.Nd = gb.dim_max()
Esempio n. 16
0
def _split_fracture_extension(
    bucket: pp.GridBucket,
    g_h: pp.Grid,
    g_l: pp.Grid,
    faces_h: np.ndarray,
    nodes_h: np.ndarray,
    cells_l: np.ndarray,
    non_planar: bool = False,
):
    """
    Split the higher-dimensional grid along specified faces. Updates made to
    face_cells of the grid pair and the nodes and faces of the higher-
    dimensional grid.
    Parameters
    ----------
    bucket      - A grid bucket
    g_h          - Higher-dimensional grid to be split along specified faces.
    g_l          - Immersed lower-dimensional grid.
    faces_h     - The higher-dimensional faces to be split.
    cells_l     - The corresponding lower-dimensional cells.
    nodes_h     - The corresponding (hig_her-dimensional) nodes.

    """
    # IMPLEMENTATION NOTE: Part of the following code is likely more general than
    # necessary considering assumptions made before we reach this point - e.g.
    # assumptions in propagate_fractures() and other subfunctions. Specifically,
    # it is unlikely the code will be called with g_h.dim != bucket.dim_max().

    # We are splitting faces in g_h. This affects all the immersed fractures,
    # as face_cells has to be extended for the new faces_h.
    neigh = np.array(bucket.node_neighbors(g_h))

    # Find the neighbours that are lower dimensional
    is_low_dim_grid = np.where([w.dim < g_h.dim for w in neigh])
    low_dim_neigh = neigh[is_low_dim_grid]
    edges = [(g_h, w) for w in low_dim_neigh]
    g_l_ind = np.nonzero(low_dim_neigh == g_l)[0]
    if len(edges) == 0:
        # No lower dim grid. Nothing to do.
        warnings.warn("Unexpected neighbourless g_h in fracture propagation")
        return

    face_cell_list: List[sps.spmatrix] = [
        bucket.edge_props(e, "face_cells") for e in edges
    ]

    # We split all the faces that are connected to faces_h
    # The new faces will share the same nodes and properties (normals,
    # etc.)
    face_cell_list = pp.fracs.split_grid.split_specific_faces(
        g_h, face_cell_list, faces_h, cells_l, g_l_ind, non_planar)

    # Replace the face-cell relation on the GridBucket edge
    for e, f in zip(edges, face_cell_list):
        bucket.edge_props(e)["face_cells"] = f

    # We now find which lower-dim nodes correspond to which higher-
    # dim nodes. We split these nodes according to the topology of
    # the connected higher-dim cells. At a X-intersection we split
    # the node into four, while at the fracture boundary it is not split.
    pp.fracs.split_grid.split_nodes(g_h, [g_l], [nodes_h])

    # Remove zeros from cell_faces
    for g, _ in bucket:
        g.cell_faces.eliminate_zeros()
Esempio n. 17
0
def propagate_fractures(gb: pp.GridBucket, faces: Dict[pp.Grid,
                                                       np.ndarray]) -> None:
    """
    gb - grid bucket with matrix and fracture grids.
    faces_h - list of list of faces to be split in the highest-dimensional
        grid. The length of the outer list equals the number of fractures.
        Each entry in the list is a list containing the higher-dimensional
        indices of the faces to be split for the extension of the corresponding
        fracture.
    Changes to grids done in-place.
    The call changes:
        Geometry and connectivity fields of the two grids involved.
        The face_cells mapping between them
        Their respective face tags.
    Also adds the following to node data dictionaries:
        new_cells and new_faces tags, for use in e.g. local discretization
        updates.
        partial_update, a boolean flag indicating that the grids have been
        updated.

    """

    dim_h: int = gb.dim_max()
    g_h: pp.Grid = gb.grids_of_dimension(dim_h)[0]

    n_old_faces_h: int = g_h.num_faces

    # First initialise certain tags to get rid of any existing tags from
    # previous calls
    d_h: Dict = gb.node_props(g_h)
    d_h["new_cells"] = np.empty(0, dtype=int)
    d_h["new_faces"] = np.empty(0, dtype=int)
    d_h["split_faces"] = np.empty(0, dtype=int)

    # Data structure for keeping track of faces in g_h to be split
    split_faces = np.empty(0, dtype=np.int)

    # By default, we will not update the higher-dimensional grid. This will be
    # changed in the below for loop if the grid gets faces split.
    # This variable can be used e.g. to check if a rediscretization is necessary on
    # the higher-dimensional grid
    d_h["partial_update"] = False

    # Initialize mapping between old and new faces for g_h. We will store the updates
    # from splitting related to each lower-dimensional grid, and then merge towards the
    # end; the split data may be handy for debugging
    face_map_h: List[sps.spmatrix] = [
        sps.dia_matrix((np.ones(g_h.num_faces), 0),
                       (g_h.num_faces, g_h.num_faces))
    ]

    # The propagation is divided into two main steps:
    # First, update the geomtry of the fracture grids, and, simultaneously, the higher
    # dimensional grid (the former will be updated once, the latter may undergo several
    # update steps, depending on how many fractures propagate).
    # Second, update the mortar grids. This is done after all fractures have been
    # propagated.

    for g_l in gb.grids_of_dimension(dim_h - 1):

        # The propagation of a fracture consists of the following major steps:
        #   1. Find which faces in g_h should be split for this g_l.
        #   2. Add nodes to g_l where the fracture will propagate.
        #   3. Update face-node and cell-face relation in g_l.
        #   4. Update face geometry of g_l.
        #   5. Update cell geometry of g_l.
        #   6. Split the faces in g_h to make room for the new fracture.
        #   7. Update geometry in g_l and g_h.
        #
        # IMPLEMENTATION NOTE: While point 7 replaces information from 4 and 5, the
        # provisional fields may still be needed in point 6.

        # Initialize data on new faces and cells
        d_l = gb.node_props(g_l)
        d_l["new_cells"] = np.empty(0, dtype=int)
        d_l["new_faces"] = np.empty(0, dtype=int)

        # Step 1:
        # Uniquify the faces to be split. Amongs others, this avoids trouble when
        # a faces is requested split twice, from two neighboring faces
        faces_h = np.unique(np.atleast_1d(np.array(faces[g_l])))
        split_faces = np.append(split_faces, faces_h)

        if faces_h.size == 0:
            # If there is no propagation for this fracture, we continue
            # No need to update discretization of this grid
            d_l["partial_update"] = False

            # Variable mappings are unit mappings
            d_l["face_index_map"] = sps.identity(g_l.num_faces)
            d_l["cell_index_map"] = sps.identity(g_l.num_cells)

            # Identity mapping of faces in this step
            face_map_h.append(sps.identity(g_h.num_faces))

            # Move on to the next fracture
            continue

        # Keep track of original information:
        n_old_faces_l = g_l.num_faces
        n_old_cells_l = g_l.num_cells
        n_old_nodes_l = g_l.num_nodes
        n_old_nodes_h = g_h.num_nodes

        # It is convenient to tag the nodes lying on the domain boundary. This
        # helps updating the face tags later:
        pp.utils.tags.add_node_tags_from_face_tags(gb, "domain_boundary")

        # Step 2:
        # Get the "involved nodes", i.e., the union between the new nodes in
        # the lower dimension and the boundary nodes where the fracture
        # propagates. The former are added to the nodes in g_l - specifically,
        # both node coordinates and global_point_ind of g_l are amended.
        unique_node_ind_l, unique_node_ind_h = _update_nodes_fracture_grid(
            g_h, g_l, faces_h)

        # Step 3:
        # Update the connectivity matrices (cell_faces and face_nodes) and tag
        # the lower-dimensional faces, including re-classification of (former)
        # tips to internal faces, where appropriate.
        n_new_faces, new_face_centers = _update_connectivity_fracture_grid(
            g_l,
            g_h,
            unique_node_ind_l,
            unique_node_ind_h,
            n_old_nodes_l,
            n_old_faces_l,
            n_old_cells_l,
            faces_h,
        )

        # Step 4: Update fracture grid face geometry
        # Note: This simply expands arrays with face geometry, but it does not
        # compute reasonable values for the geometry
        _append_face_geometry_fracture_grid(g_l, n_new_faces, new_face_centers)

        # Step 5: Update fracture grid cell geometry
        # Same for cells. Here the geometry quantities are copied from the
        # face values of g_h, thus values should be reasonable.
        new_cells: np.ndarray = _update_cells_fracture_grid(g_h, g_l, faces_h)

        # Step 6: Split g_h along faces_h
        _split_fracture_extension(gb,
                                  g_h,
                                  g_l,
                                  faces_h,
                                  unique_node_ind_h,
                                  new_cells,
                                  non_planar=True)

        # Store information on which faces and cells have just been added.
        # Note that we only keep track of the faces and cells from the last
        # propagation call!
        new_faces_l = np.arange(g_l.num_faces - n_new_faces, g_l.num_faces)
        new_faces_h = g_h.frac_pairs[1, np.isin(g_h.frac_pairs[0], faces_h)]

        # Sanity check on the grid; most likely something will have gone wrong
        # long before if there is a problem.
        assert np.all(new_faces_h >= n_old_faces_h)
        if not np.min(new_cells) >= n_old_cells_l:
            raise ValueError(
                "New cells are assumed to be appended to cell array")
        if not np.min(new_faces_l) >= n_old_faces_l:
            raise ValueError(
                "New faces are assumed to be appended to face array")

        # Update the geometry
        _update_geometry(g_h, g_l, new_cells, n_old_cells_l, n_old_faces_l)

        # Finally some bookkeeping that can become useful in a larger-scale simulation.

        # Mark both grids for a partial update
        d_h["partial_update"] = True
        d_l["partial_update"] = True

        # Append arrays of new faces (g_l, g_h) and cells (g_l)
        d_h["new_faces"] = np.append(d_h["new_faces"], new_faces_h)
        d_l["new_cells"] = np.append(d_l["new_cells"], new_cells)
        d_l["new_faces"] = np.append(d_l["new_faces"], new_faces_l)

        # Create mappings between the old and and faces and cells in g_l
        arr = np.arange(n_old_faces_l)
        face_map_l = sps.coo_matrix(
            (np.ones(n_old_faces_l, dtype=np.int), (arr, arr)),
            shape=(g_l.num_faces, n_old_faces_l),
        ).tocsr()
        arr = np.arange(n_old_cells_l)
        cell_map_l = sps.coo_matrix(
            (np.ones(n_old_cells_l, dtype=np.int), (arr, arr)),
            shape=(g_l.num_cells, n_old_cells_l),
        ).tocsr()

        # These can be stored directly - there should be no more changes for g_l
        d_l["face_index_map"] = face_map_l
        d_l["cell_index_map"] = cell_map_l

        # For g_h we construct the map of faces for the splitting of this g_l
        # and append it to the list of face_maps

        # The size of the next map should be compatible with the number of faces in
        # the previous map.
        nfh = face_map_h[-1].shape[0]
        arr = np.arange(nfh)
        face_map_h.append(
            sps.coo_matrix(
                (np.ones(nfh, dtype=np.int), (arr, arr)),
                shape=(g_h.num_faces, nfh),
            ).tocsr())

        # Append default tags for the new nodes. Both high and low-dimensional grid
        _append_node_tags(g_l, g_l.num_nodes - n_old_nodes_l)
        _append_node_tags(g_h, g_h.num_nodes - n_old_nodes_h)

    # The standard node tags are updated from the face tags, which are updated on the
    # fly in the above loop.
    node_tags = ["domain_boundary", "tip", "fracture"]
    for tag in node_tags:
        # The node tag is set to true if at least one neighboring face is tagged
        pp.utils.tags.add_node_tags_from_face_tags(gb, tag)
    # Done with all splitting.

    # Compose the mapping of faces for g_l
    fm = face_map_h[0]
    for m in face_map_h[1:]:
        fm = m * fm
    d_h["face_index_map"] = fm
    # Also make a cell-map, this is a 1-1 mapping in this case
    d_h["cell_index_map"] = sps.identity(g_h.num_cells)

    d_h["split_faces"] = np.array(split_faces, dtype=int)

    ##
    # Second main step of propagation: Update mortar grid.

    # When all faces have been split, we can update the mortar grids
    for e, d_e in gb.edges_of_node(g_h):
        _, g_l = e
        d_l = gb.node_props(g_l)
        _update_mortar_grid(g_h, g_l, d_e, d_l["new_cells"], d_h["new_faces"])

        # Mapping of cell indices on the mortar grid is composed by the corresponding
        # map for g_l.
        cell_map = sps.kron(sps.identity(2), d_l["cell_index_map"]).tocsr()
        d_e["cell_index_map"] = cell_map

        # Also update projection operators
        pp.contact_conditions.set_projections(gb, [e])
Esempio n. 18
0
def set_projections(
        gb: pp.GridBucket,
        edges: Optional[List[Tuple[pp.Grid, pp.Grid]]] = None) -> None:
    """Define a local coordinate system, and projection matrices, for all
    grids of co-dimension 1.

    The function adds one item to the data dictionary of all GridBucket edges
    that neighbors a co-dimension 1 grid, defined as:
        key: tangential_normal_projection, value: pp.TangentialNormalProjection
            provides projection to the surface of the lower-dimensional grid

    Note that grids of co-dimension 2 and higher are ignored in this construction,
    as we do not plan to do contact mechanics on these objects.

    It is assumed that the surface is planar.

    """
    if edges is None:
        edges = [e for e, _ in gb.edges()]

    # Information on the vector normal to the surface is not available directly
    # from the surface grid (it could be constructed from the surface geometry,
    # which spans the tangential plane). We instead get the normal vector from
    # the adjacent higher dimensional grid.
    # We therefore access the grids via the edges of the mixed-dimensional grid.
    for e in edges:
        d_m = gb.edge_props(e)

        mg = d_m["mortar_grid"]
        # Only consider edges where the lower-dimensional neighbor is of co-dimension 1
        if not mg.dim == (gb.dim_max() - 1):
            continue

        # Neigboring grids
        g_l, g_h = gb.nodes_of_edge(e)

        # Find faces of the higher dimensional grid that coincide with the mortar
        # grid. Go via the primary to mortar projection
        # Convert matrix to csr, then the relevant face indices are found from
        # the (column) indices
        faces_on_surface = mg.primary_to_mortar_int().tocsr().indices

        # Find out whether the boundary faces have outwards pointing normal vectors
        # Negative sign implies that the normal vector points inwards.
        sgn, _ = g_h.signs_and_cells_of_boundary_faces(faces_on_surface)

        # Unit normal vector
        unit_normal = g_h.face_normals[:g_h.dim] / g_h.face_areas
        # Ensure all normal vectors on the relevant surface points outwards
        unit_normal[:, faces_on_surface] *= sgn

        # Now we need to pick out *one*  normal vector of the higher dimensional grid

        # which coincides with this mortar grid, so we kill off all entries for the
        # "other" side:
        unit_normal[:, mg._ind_face_on_other_side] = 0

        # Project to the mortar and then to the fracture
        outwards_unit_vector_mortar = mg.primary_to_mortar_int().dot(
            unit_normal.T).T
        normal_lower = mg.mortar_to_secondary_int().dot(
            outwards_unit_vector_mortar.T).T

        # NOTE: The normal vector is based on the first cell in the mortar grid,
        # and will be pointing from that cell towards the other side of the
        # mortar grid. This defines the positive direction in the normal direction.
        # Although a simpler implementation seems to be possible, going via the
        # first element in faces_on_surface, there is no guarantee that this will
        # give us a face on the positive (or negative) side, hence the more general
        # approach is preferred.
        #
        # NOTE: The basis for the tangential direction is determined by the
        # construction internally in TangentialNormalProjection.
        projection = pp.TangentialNormalProjection(normal_lower)

        d_l = gb.node_props(g_l)
        # Store the projection operator in the lower-dimensional data
        d_l["tangential_normal_projection"] = projection
Esempio n. 19
0
def grid_error(
    gb: pp.GridBucket,
    gb_ref: pp.GridBucket,
    variable: List[str],
    variable_dof: List[int],
) -> dict:
    """ Compute grid errors a grid bucket and refined reference grid bucket

    Assumes that the coarse grid bucket has a node property
    'coarse_fine_cell_mapping' assigned on each grid, which
    maps from coarse to fine cells according to the method
    'coarse_fine_cell_mapping(...)'.

    Parameters
    ----------
    gb, gb_ref : pp.GridBucket
        Coarse and fine grid buckets, respectively
    variable : List[str]
        which variables to compute error over
    variable_dof : List[int]
        Degrees of freedom for each variable in the list 'variable'.

    Returns
    -------
    errors : dict
        Dictionary with top level keys as node_number,
        within which for each variable, the error is
        reported.
    """

    if not isinstance(variable, list):
        variable = [variable]
    if not isinstance(variable_dof, list):
        variable_dof = [variable_dof]
    assert len(variable) == len(variable_dof), (
        "Each variable must have associated "
        "with it a number of degrees of freedom.")
    n_variables = len(variable)

    errors = {}

    grids = gb.get_grids()
    grids_ref = gb_ref.get_grids()
    n_grids = len(grids)

    for i in np.arange(n_grids):
        g, g_ref = grids[i], grids_ref[i]
        mapping = gb.node_props(g, "coarse_fine_cell_mapping")

        # Get states
        data = gb.node_props(g)
        data_ref = gb_ref.node_props(g_ref)
        states = data[pp.STATE]
        states_ref = data_ref[pp.STATE]

        node_number = data["node_number"]

        # Initialize errors
        errors[node_number] = {}

        for var_idx in range(0, n_variables):
            var = variable[var_idx]
            var_dof = variable_dof[var_idx]

            # Check if the variable exists on both
            # the grid and reference grid
            state_keys = set(states.keys())
            state_ref_keys = set(states_ref.keys())
            check_keys = state_keys.intersection(state_ref_keys)
            if var not in check_keys:
                logger.info(f"{var} not present on grid number "
                            f"{node_number} of dim {g.dim}.")
                continue

            # Compute errors relative to the reference grid
            # TODO: Should the solution be divided by
            #  g.cell_volumes or similar?
            # TODO: If scaling is used, consider that
            #  - or use the export-ready variables,
            #   'u_exp', 'p_exp', etc.
            sol = (states[var].reshape((var_dof, -1),
                                       order="F").T)  # (num_cells x var_dof)
            mapped_sol: np.ndarray = mapping.dot(
                sol)  # (num_cells x variable_dof)
            sol_ref = (states_ref[var].reshape(
                (var_dof, -1), order="F").T)  # (num_cells x var_dof)

            # axis=0 gives component-wise norm.
            absolute_error = np.linalg.norm(mapped_sol - sol_ref, axis=0)
            norm_ref = np.linalg.norm(sol_ref, axis=0)

            if np.any(norm_ref < 1e-10):
                logger.info(f"Relative error not reportable. "
                            f"Norm of reference solution is {norm_ref}. "
                            f"Reporting absolute error")
                error = absolute_error
                is_relative = False
            else:
                error = absolute_error / norm_ref
                is_relative = True

            errors[node_number][var] = {
                "error": error,
                "is_relative": is_relative,
            }

    return errors
Esempio n. 20
0
def set_projections(gb: pp.GridBucket) -> None:
    """ Define a local coordinate system, and projection matrices, for all
    grids of co-dimension 1.

    The function adds one item to the data dictionary of all GridBucket edges
    that neighbors a co-dimension 1 grid, defined as:
        key: tangential_normal_projection, value: pp.TangentialNormalProjection
            provides projection to the surface of the lower-dimensional grid

    Note that grids of co-dimension 2 and higher are ignored in this construction,
    as we do not plan to do contact mechanics on these objects.

    It is assumed that the surface is planar.

    """
    # Information on the vector normal to the surface is not available directly
    # from the surface grid (it could be constructed from the surface geometry,
    # which spans the tangential plane). We instead get the normal vector from
    # the adjacent higher dimensional grid.
    # We therefore access the grids via the edges of the mixed-dimensional grid.
    for e, d_m in gb.edges():

        mg = d_m["mortar_grid"]
        # Only consider edges where the lower-dimensional neighbor is of co-dimension 1
        if not mg.dim == (gb.dim_max() - 1):
            continue

        # Neigboring grids
        _, g_h = gb.nodes_of_edge(e)

        # Find faces of the higher dimensional grid that coincide with the mortar
        # grid. Go via the master to mortar projection
        # Convert matrix to csr, then the relevant face indices are found from
        # the (column) indices
        faces_on_surface = mg.master_to_mortar_int().tocsr().indices

        # Find out whether the boundary faces have outwards pointing normal vectors
        # Negative sign implies that the normal vector points inwards.
        sgn = g_h.sign_of_faces(faces_on_surface)

        # Unit normal vector
        unit_normal = g_h.face_normals[: g_h.dim] / g_h.face_areas
        # Ensure all normal vectors on the relevant surface points outwards
        unit_normal[:, faces_on_surface] *= sgn

        # Now we need to pick out *one*  normal vector of the higher dimensional grid
        # which coincides with this mortar grid. This could probably have been
        # done with face tags, but we instead project the normal vectors onto the
        # mortar grid to kill off all irrelevant faces. Restriction to a single
        # normal vector is done in the construction of the projection object
        # (below).
        # NOTE: Use a single normal vector to span the tangential and normal space,
        # thus assuming the surface is planar.
        outwards_unit_vector_mortar = mg.master_to_mortar_int().dot(unit_normal.T).T

        # NOTE: The normal vector is based on the first cell in the mortar grid,
        # and will be pointing from that cell towards the other side of the
        # mortar grid. This defines the positive direction in the normal direction.
        # Although a simpler implementation seems to be possible, going via the
        # first element in faces_on_surface, there is no guarantee that this will
        # give us a face on the positive (or negative) side, hence the more general
        # approach is preferred.
        #
        # NOTE: The basis for the tangential direction is determined by the
        # construction internally in TangentialNormalProjection.
        projection = pp.TangentialNormalProjection(
            outwards_unit_vector_mortar[:, 0].reshape((-1, 1))
        )

        # Store the projection operator in the mortar data
        d_m["tangential_normal_projection"] = projection
Esempio n. 21
0
def extrude_grid_bucket(gb: pp.GridBucket, z: np.ndarray) -> Tuple[pp.GridBucket, Dict]:
    """ Extrude a GridBucket by extending all fixed-dimensional grids in the z-direction.

    In practice, the original grid bucket will be 2d, and the result is 3d.

    The returned GridBucket is fully functional, including mortar grids on the gb edges.
    The data dictionaries on nodes and edges are mainly empty. Data can be transferred from
    the original GridBucket via the returned map between old and new grids.

    Parameters:
        gb (pp.GridBukcet): Mixed-dimensional grid to be extruded. Should be 2d.
        z (np.ndarray): z-coordinates of the nodes in the extruded grid. Should be
            either non-negative or non-positive, and be sorted in increasing or
            decreasing order, respectively.

    Returns:
        gb (pp.GridBucket): Mixed-dimensional grid, 3d. The data dictionaries on nodes and
            edges are mostly empty.
        dict: Mapping from individual grids in the old bucket to the corresponding
            extruded grids in the new one. The dictionary values are a namedtuple with
            elements grid (new grid), cell_map and face_map, where the two latter
            describe mapping between the new and old grid, see extrude_grid for details.

    """

    # New GridBucket. to be filled in
    gb_new = pp.GridBucket()

    # Data structure for mapping between old and new grids
    g_map = {}

    # Container for grid information
    Mapping = namedtuple("mapping", ["grid", "cell_map", "face_map"])

    # Loop over all grids in the old bucket, extrude the grid, save mapping information
    for g, _ in gb:
        g_new, cell_map, face_map = extrude_grid(g, z)

        if hasattr(g, "frac_num"):
            g_new.frac_num = g.frac_num

        gb_new.add_nodes([g_new])

        g_map[g] = Mapping(g_new, cell_map, face_map)

    # Loop over all edges in the old grid, create corresponding edges in the new gb.
    # Also define mortar_grids
    for e, d in gb.edges():

        # grids of the old edge, extruded version of each grid
        gl, gh = gb.nodes_of_edge(e)
        gl_new = g_map[gl].grid
        gh_new = g_map[gh].grid

        # Next, we need the cell-face mapping for the new grid.
        # The idea is to first find the old map, then replace each cell-face relation
        # with the set of cells and faces (exploiting first that the new grids are
        # matching due to the extrusion algorithm, and second that the cell-map and
        # face-map stores indices in increasing layer index, so that the first cell
        # and first face both are in the first layer, thus they match, etc.).
        face_cells_old = d["face_cells"]

        # cells (in low-dim grid) and faces in high-dim grid that define the same
        # geometric quantity
        cells, faces, _ = sps.find(face_cells_old)

        # Cell-map for the low-dimensional grid, face-map for the high-dim
        cell_map = g_map[gl].cell_map
        face_map = g_map[gh].face_map

        # Data structure for the new face-cell map
        rows = np.empty(0, dtype=np.int)
        cols = np.empty(0, dtype=np.int)

        # The standard MortarGrid __init__ assumes that when faces are split because of
        # a fracture, the faces are ordered with one side first, then the other. This
        # will not be True for this layered construction. Instead, keep track of all
        # faces that should be moved to the other side.
        face_on_other_side = np.empty(0, dtype=np.int)

        # Loop over cells in gl would not have been as clean, as each cell is associated
        # with faces on both sides
        # Faces are found from the high-dim grid, cells in the low-dim grid
        for idx in range(faces.size):
            rows = np.hstack((rows, cell_map[cells[idx]]))
            cols = np.hstack((cols, face_map[faces[idx]]))

            # Here, we tacitly assume that the original grid had its faces split in the
            # standard way, that is, all faces on one side have index lower than any
            # face on the other side.
            if faces[idx] > np.median(faces):
                face_on_other_side = np.hstack(
                    (face_on_other_side, face_map[faces[idx]])
                )

        data = np.ones(rows.size, dtype=np.bool)
        # Create new face-cell map
        face_cells_new = sps.coo_matrix(
            (data, (rows, cols)), shape=(gl_new.num_cells, gh_new.num_faces)
        ).tocsc()

        # Define the new edge
        e = (gh_new, gl_new)
        # Add to new gb, together with the new face-cell map
        gb_new.add_edge(e, face_cells_new)

        # Create a mortar grid, add to data of new edge
        side_g = {
            mortar_grid.LEFT_SIDE: gl_new.copy(),
            mortar_grid.RIGHT_SIDE: gl_new.copy(),
        }

        # Construct mortar grid, with instructions on which faces belong to which side
        mg = pp.MortarGrid(
            gl_new.dim, side_g, face_cells_new, face_duplicate_ind=face_on_other_side
        )

        d_new = gb_new.edge_props(e)

        d_new["mortar_grid"] = mg

    return gb_new, g_map
Esempio n. 22
0
def gb_refinement(
    gb: pp.GridBucket, gb_ref: pp.GridBucket, tol: float = 1e-8, mode: str = "nested"
):
    """Wrapper for coarse_fine_cell_mapping to construct mapping for grids in
    GridBucket.

    Adds a node_prop to each grid in gb. The key is 'coarse_fine_cell_mapping',
    and is the mapping generated by 'coarse_fine_cell_mapping(...)'.

    Currently, only nested refinement is supported; more general cases are also
    possible.

    Note: No node prop is added to the reference grids in gb_ref.

    Parameters
    ----------
    gb : pp.GridBucket
        Coarse grid bucket
    gb_ref : pp.GridBucket
        Refined grid bucket
    tol : float, Optional
        Tolerance for point_in_poly* -methods
    mode : str, Optional
        Refinement mode. Defaults to 'nested', corresponds to refinement by splitting.

    Acknowledgement: The code was contributed by Haakon Ervik.

    """

    grids = gb.get_grids()
    grids_ref = gb_ref.get_grids()

    assert len(grids) == len(
        grids_ref
    ), "Weakly check that GridBuckets refer to same domains"
    assert np.allclose(
        np.append(*gb.bounding_box()), np.append(*gb_ref.bounding_box())
    ), "Weakly check that GridBuckets refer to same domains"

    # This method assumes a consistent node ordering between grids. At least assign one.
    gb.assign_node_ordering(overwrite_existing=False)
    gb_ref.assign_node_ordering(overwrite_existing=False)

    # Add node prop on the coarse grid to map from coarse to fine cells.
    gb.add_node_props(keys="coarse_fine_cell_mapping")

    for i in np.arange(len(grids)):
        g, g_ref = grids[i], grids_ref[i]

        node_num, node_num_ref = (
            gb._nodes[g]["node_number"],
            gb_ref._nodes[g_ref]["node_number"],
        )
        assert node_num == node_num_ref, "Weakly check that grids refer to same domain."

        # Compute the mapping for this grid-pair,
        # and assign the result to the node of the coarse gb
        if mode == "nested":
            mapping = structured_refinement(g, g_ref, point_in_poly_tol=tol)
        else:
            raise NotImplementedError("Unknown refinement mode")

        gb.set_node_prop(grid=g, key="coarse_fine_cell_mapping", val=mapping)
Esempio n. 23
0
    def to_ad(
        self,
        gb: pp.GridBucket,
        state: Optional[np.ndarray] = None,
        active_variables: Optional[list] = None,
    ):
        """Evaluate the residual and Jacobian matrix for a given state.

        Parameters:
            gb (pp.GridBucket): GridBucket used to represent the problem. Will be used
                to parse the operators that combine to form this Equation..
            state (np.ndarray, optional): State vector for which the residual and its
                derivatives should be formed. If not provided, the state will be pulled from
                the previous iterate (if this exists), or alternatively from the state
                at the previous time step.

        Returns:
            An Ad-array representation of the residual and Jacbobian.

        """
        # Parsing in two stages: First make an Ad-representation of the variable state
        # (this must be done jointly for all variables of the Equation to get all
        # derivatives represented). Then parse the equation by traversing its
        # tree-representation, and parse and combine individual operators.

        # Initialize variables
        prev_vals = np.zeros(self._dof_manager.num_dofs())

        populate_state = state is None
        if populate_state:
            state = np.zeros(self._dof_manager.num_dofs())

        assert state is not None
        for (g, var) in self._dof_manager.block_dof:
            ind = self._dof_manager.dof_ind(g, var)
            if isinstance(g, tuple):
                prev_vals[ind] = gb.edge_props(g, pp.STATE)[var]
            else:
                prev_vals[ind] = gb.node_props(g, pp.STATE)[var]

            if populate_state:
                if isinstance(g, tuple):
                    try:
                        state[ind] = gb.edge_props(g, pp.STATE)[pp.ITERATE][var]
                    except KeyError:
                        prev_vals[ind] = gb.edge_props(g, pp.STATE)[var]
                else:
                    try:
                        state[ind] = gb.node_props(g, pp.STATE)[pp.ITERATE][var]
                    except KeyError:
                        state[ind] = gb.node_props(g, pp.STATE)[var]

        # Initialize Ad variables with the current iterates
        if active_variables is None:
            ad_vars = initAdArrays([state[ind] for ind in self._variable_dofs])
            self._ad = {var_id: ad for (var_id, ad) in zip(self._variable_ids, ad_vars)}
        else:
            active_variable_ids = [v.id for v in active_variables]

            ad_variable_ids = list(
                set(self._variable_ids).intersection(active_variable_ids)
            )
            assert all([i in self._variable_ids for i in active_variable_ids])
            ad_variable_local_ids = [
                self._variable_ids.index(i) for i in active_variable_ids
            ]
            ad_variable_dofs = [self._variable_dofs[i] for i in ad_variable_local_ids]
            ad_vars = initAdArrays([state[ind] for ind in ad_variable_dofs])
            self._ad = {var_id: ad for (var_id, ad) in zip(ad_variable_ids, ad_vars)}

        # Also make mappings from the previous iteration.
        if active_variables is None:
            prev_iter_vals_list = [state[ind] for ind in self._prev_iter_dofs]
            self._prev_iter_vals = {
                var_id: val
                for (var_id, val) in zip(self._prev_iter_ids, prev_iter_vals_list)
            }
        else:
            # FIXME: This needs explanations
            prev_iter_vals_list = [state[ind] for ind in self._prev_iter_dofs]
            non_ad_variable_ids = list(set(self._variable_ids) - set(ad_variable_ids))
            non_ad_variable_local_ids = [
                self._variable_ids.index(i) for i in non_ad_variable_ids
            ]
            non_ad_variable_dofs = [
                self._variable_dofs[i] for i in non_ad_variable_local_ids
            ]
            non_ad_vals_list = [state[ind] for ind in non_ad_variable_dofs]
            self._prev_iter_vals = {
                var_id: val
                for (var_id, val) in zip(
                    self._prev_iter_ids + non_ad_variable_ids,
                    prev_iter_vals_list + non_ad_vals_list,
                )
            }

        # Also make mappings from the previous time step.
        prev_vals_list = [prev_vals[ind] for ind in self._prev_time_dofs]
        self._prev_vals = {
            var_id: val for (var_id, val) in zip(self._prev_time_ids, prev_vals_list)
        }

        # Parse operators. This is left to a separate function to facilitate the
        # necessary recursion for complex operators.
        eq = self._parse_operator(self._operator, gb)

        return eq
Esempio n. 24
0
    def __init__(
        self,
        gb: pp.GridBucket,
        grids: Optional[List[pp.Grid]] = None,
        edges: Optional[List[Tuple[pp.Grid, pp.Grid]]] = None,
        nd: int = 1,
    ) -> None:
        """Construct mortar projection object.

        The projections will be ordered according to the ordering in grids, or the order
        of the GridBucket iteration over grids. Iit is critical that the same ordering
        is used by other operators.

        Parameters:
            grids (List of pp.Grid, optional): List of grids for which the projections
                should apply. If not provided, all grids in gb will be used. The order
                 of the grids in the list sets the ordering of the subdomain projections.
            gb (pp.GridBucket): Mixed-dimensional grid.
            edges (List of edges, optional): List of edges for which the projections
                should apply. If not provided, all grids in gb will be used. The order
                 of the grids in the list sets the ordering of the subdomain projections.
            nd (int, optional): Dimension of the quantities to be projected.

        """
        grids = _grid_list(grids, gb)
        if edges is None:
            edges = [e for e, _ in gb.edges()]

        self._num_edges: int = len(edges)
        self._nd: int = nd

        ## Initialize projections

        cell_projection, face_projection = _subgrid_projections(
            grids, self._nd)

        # sparse blocks are slow; it should be possible to do a right multiplication
        # of local-to-global mortar indices instead of the block.

        # Data structures for constructing the projection operators
        mortar_to_primary_int, mortar_to_primary_avg = [], []
        primary_to_mortar_int, primary_to_mortar_avg = [], []

        mortar_to_secondary_int, mortar_to_secondary_avg = [], []
        secondary_to_mortar_int, secondary_to_mortar_avg = [], []

        # The goal is to construct global projections between grids and mortar grids.
        # The construction takes two stages, and is different for projections to and
        # from the mortar grid:
        # For projections from the mortar grid, a mapping is first made from local
        # mortar numbering global grid ordering. In the second stage, the mappings from
        # mortar are stacked to make a global mapping.
        # Projections to the mortar grid are made by first defining projections from
        # global grid numbering to local mortar grids, and then stack the latter.

        for e in edges:
            g_primary, g_secondary = e
            mg: pp.MortarGrid = gb.edge_props(e, "mortar_grid")
            if (g_primary.dim != mg.dim + 1) or g_secondary.dim != mg.dim:
                # This will correspond to DD of sorts; we could handle this
                # by using cell_projections for g_primary and/or
                # face_projection for g_secondary, depending on the exact
                # configuration
                raise NotImplementedError("Non-standard interface.")

            # Projections to primary
            mortar_to_primary_int.append(face_projection[g_primary] *
                                         mg.mortar_to_primary_int(nd))
            mortar_to_primary_avg.append(face_projection[g_primary] *
                                         mg.mortar_to_primary_avg(nd))

            # Projections from primary
            primary_to_mortar_int.append(
                mg.primary_to_mortar_int(nd) * face_projection[g_primary].T)
            primary_to_mortar_avg.append(
                mg.primary_to_mortar_avg(nd) * face_projection[g_primary].T)

            mortar_to_secondary_int.append(cell_projection[g_secondary] *
                                           mg.mortar_to_secondary_int(nd))
            mortar_to_secondary_avg.append(cell_projection[g_secondary] *
                                           mg.mortar_to_secondary_avg(nd))

            secondary_to_mortar_int.append(
                mg.secondary_to_mortar_int(nd) *
                cell_projection[g_secondary].T)
            secondary_to_mortar_avg.append(
                mg.secondary_to_mortar_avg(nd) *
                cell_projection[g_secondary].T)

        # Stack mappings from the mortar horizontally.
        # The projections are wrapped by a pp.ad.Matrix to be compatible with the
        # requirements for processing of Ad operators.
        self.mortar_to_primary_int = Matrix(
            sps.bmat([mortar_to_primary_int]).tocsr())
        self.mortar_to_primary_avg = Matrix(
            sps.bmat([mortar_to_primary_avg]).tocsr())
        self.mortar_to_secondary_int = Matrix(
            sps.bmat([mortar_to_secondary_int]).tocsr())
        self.mortar_to_secondary_avg = Matrix(
            sps.bmat([mortar_to_secondary_avg]).tocsr())

        # Vertical stacking of the projections
        self.primary_to_mortar_int = Matrix(
            sps.bmat([[m] for m in primary_to_mortar_int]).tocsr())
        self.primary_to_mortar_avg = Matrix(
            sps.bmat([[m] for m in primary_to_mortar_avg]).tocsr())
        self.secondary_to_mortar_int = Matrix(
            sps.bmat([[m] for m in secondary_to_mortar_int]).tocsr())
        self.secondary_to_mortar_avg = Matrix(
            sps.bmat([[m] for m in secondary_to_mortar_avg]).tocsr())

        # Also generate a merged version of MortarGrid.sign_of_mortar_sides:
        mats = []
        for e in edges:
            mg = gb.edge_props(e, "mortar_grid")
            mats.append(mg.sign_of_mortar_sides(nd))
        self.sign_of_mortar_sides = Matrix(sps.block_diag(mats))
Esempio n. 25
0
def gb_coarse_fine_cell_mapping(
    gb: pp.GridBucket, gb_ref: pp.GridBucket, tol=1e-8
) -> None:
    """ Wrapper for coarse_fine_cell_mapping to construct mapping for grids in
    GridBucket.

    Adds a node_prop to each grid in gb. The key is 'coarse_fine_cell_mapping',
    and is the mapping generated by 'coarse_fine_cell_mapping(...)'.

    Note: No node prop is added to the reference grids in gb_ref.

    Parameters
    ----------
    gb : pp.GridBucket
        Coarse grid bucket
    gb_ref : pp.GridBucket
        Refined grid bucket
    tol : float, Optional
        Tolerance for point_in_poly* -methods
    """

    grids = gb.get_grids()
    grids_ref = gb_ref.get_grids()

    assert len(grids) == len(
        grids_ref
    ), "Weakly check that GridBuckets refer to same domains"
    assert np.allclose(
        np.append(*gb.bounding_box()), np.append(*gb_ref.bounding_box())
    ), "Weakly check that GridBuckets refer to same domains"

    # This method assumes a consistent node ordering between grids.
    # At least assign one.
    gb.assign_node_ordering(overwrite_existing=False)
    gb_ref.assign_node_ordering(overwrite_existing=False)

    # Add node prop on the coarse grid to map from coarse to fine cells.
    gb.add_node_props(keys="coarse_fine_cell_mapping")

    for i in np.arange(len(grids)):
        g, g_ref = grids[i], grids_ref[i]

        node_num = gb.node_props(g, "node_number")
        node_num_ref = gb_ref.node_props(g_ref, "node_number")
        assert node_num == node_num_ref, "Weakly check that grids refer to same domain."

        # Compute the mapping for this grid-pair,
        # and assign the result to the node of the coarse gb
        mapping = coarse_fine_cell_mapping(g, g_ref, point_in_poly_tol=tol)
        gb.set_node_prop(g, key="coarse_fine_cell_mapping", val=mapping)
Esempio n. 26
0
 def gb(self, gb: pp.GridBucket):
     """ Set a grid bucket to the class"""
     self._gb = gb
     if gb is not None:
         self.bounding_box = gb.bounding_box(as_dict=True)