Beispiel #1
0
    def test_csc_slice(self):
        # Test slicing of csr_matrix
        A = sps.csc_matrix(np.array([[0, 0, 0], [1, 0, 0], [0, 0, 3]]))
        rows_0 = sparse_mat.slice_indices(A, np.array([0], dtype=int))
        rows_1 = sparse_mat.slice_indices(A, 1)
        rows_2 = sparse_mat.slice_indices(A, 2)
        cols_split = sparse_mat.slice_indices(A, np.array([0, 2]))
        rows0_2 = sparse_mat.slice_indices(A, np.array([0, 1, 2]))

        self.assertTrue(rows_0 == np.array([1]))
        self.assertTrue(rows_1.size == 0)
        self.assertTrue(rows_2 == np.array([2]))
        self.assertTrue(np.all(cols_split == np.array([1, 2])))
        self.assertTrue(np.all(rows0_2 == np.array([1, 2])))
Beispiel #2
0
    def test_csr_slice(self):
        # Test slicing of csr_matrix
        A = sps.csr_matrix(np.array([[0, 0, 0],
                                     [1, 0, 0],
                                     [0, 0, 3]]))

        cols_0 = sparse_mat.slice_indices(A, np.array([0]))
        cols_1 = sparse_mat.slice_indices(A, 1)
        cols_2 = sparse_mat.slice_indices(A, 2)
        cols_split = sparse_mat.slice_indices(A, np.array([0, 2]))
        cols0_2 = sparse_mat.slice_indices(A, np.array([0, 1, 2]))

        assert cols_0.size == 0
        assert cols_1 == np.array([0])
        assert cols_2 == np.array([2])
        assert np.all(cols_split == np.array([2]))
        assert np.all(cols0_2 == np.array([0, 2]))
Beispiel #3
0
 def bfs(self, start, color):
     """
     Breadth first search
     """
     visited, queue = [], [start]
     while queue:
         node = queue.pop(0)
         if node not in visited:
             visited.append(node)
             neighbours = sparse_mat.slice_indices(self.node_connections,
                                                   node)
             queue.extend(neighbours)
     self.color[visited] = color
Beispiel #4
0
def _duplicate_nodes_with_offset(g: pp.Grid, nodes: np.ndarray,
                                 offset: float) -> int:
    """
    Duplicate nodes on a fracture, and perturb the duplicated nodes. This option
    is useful for visualization purposes.

    NOTE: This is a legacy implementation, which should not be invoked directly.
    Instead use duplicate nodes (more efficient, but without the possibility to
    perturb nodes); that method will invoke the present if a perturbation is
    requested.

    Parameters:
    ----------
    g         - The grid for which the nodes are duplicated
    nodes     - The nodes to be duplicated
    offset    - How far from the original node the duplications should be
                placed.
    """
    node_count = 0

    # We wish to convert the sparse csc matrix to a sparse
    # csr matrix to easily add rows. However, the convertion sorts the
    # indices, which will change the node order when we convert back. We
    # therefore find the inverse sorting of the nodes of each face.
    # After we have performed the row operations we will map the nodes
    # back to their original position.
    _, iv = _sort_sub_list(g.face_nodes.indices, g.face_nodes.indptr)

    g.face_nodes = g.face_nodes.tocsr()
    # Iterate over each internal node and split it according to the graph.
    # For each cell attached to the node, we check wich color the cell has.
    # All cells with the same color is then attached to a new copy of the
    # node.
    cell_nodes = g.cell_nodes().tocsr()

    for node in nodes:
        # t_node takes into account the added nodes.
        t_node = node + node_count
        # Find cells connected to node

        cells = np.unique(sparse_mat.slice_indices(cell_nodes, node))
        # Find the color of each cell. A group of cells is given the same color
        # if they are connected by faces. This means that all cells on one side
        # of a fracture will have the same color, but a different color than
        # the cells on the other side of the fracture. Equivalently, the cells
        # at a X-intersection will be given four different colors
        colors = _find_cell_color(g, cells)
        # Find which cells share the same color
        colors, ix = np.unique(colors, return_inverse=True)

        # copy coordinate of old node
        new_nodes = np.repeat(g.nodes[:, t_node, None], colors.size, axis=1)
        faces = np.array([], dtype=int)
        face_pos = np.array([g.face_nodes.indptr[t_node]])
        assert g.cell_faces.getformat() == "csc"
        assert g.face_nodes.getformat() == "csr"
        faces_of_node_t = sparse_mat.slice_indices(g.face_nodes, t_node)

        for j in range(colors.size):
            # For each color we wish to add one node. First we find all faces that
            # are connected to the fracture node, and have the correct cell
            # color
            colored_faces = np.unique(
                sparse_mat.slice_indices(g.cell_faces, cells[ix == j]))

            is_colored = np.in1d(faces_of_node_t,
                                 colored_faces,
                                 assume_unique=True)

            faces = np.append(faces, faces_of_node_t[is_colored])

            # These faces are then attached to new node number j.
            face_pos = np.append(face_pos, face_pos[-1] + np.sum(is_colored))

            # If an offset is given, we will change the position of the nodes.
            # We move the nodes a length of offset away from the fracture(s).
            if offset > 0 and colors.size > 1:
                new_nodes[:, j] -= _avg_normal(
                    g, faces_of_node_t[is_colored]) * offset

                # The total number of faces should not have changed, only their
        # connection to nodes. We can therefore just update the indices and
        # indptr map.
        g.face_nodes.indices[face_pos[0]:face_pos[-1]] = faces
        node_count += colors.size - 1
        g.face_nodes.indptr = np.insert(g.face_nodes.indptr, t_node + 1,
                                        face_pos[1:-1])
        g.face_nodes._shape = (
            g.face_nodes.shape[0] + colors.size - 1,
            g.face_nodes._shape[1],
        )
        # We delete the old node because of the offset. If we do not
        # have an offset we could keep it and add one less node.

        g.nodes = np.delete(g.nodes, t_node, axis=1)
        g.nodes = np.insert(g.nodes, [t_node] * new_nodes.shape[1],
                            new_nodes,
                            axis=1)

        new_point_ind = np.array([g.global_point_ind[t_node]] *
                                 new_nodes.shape[1])
        g.global_point_ind = np.delete(g.global_point_ind, t_node)
        g.global_point_ind = np.insert(g.global_point_ind,
                                       [t_node] * new_point_ind.shape[0],
                                       new_point_ind,
                                       axis=0)

    # Transform back to csc format and fix node ordering.
    g.face_nodes = g.face_nodes.tocsc()
    g.face_nodes.indices = g.face_nodes.indices[iv]  # For fast row operation

    return node_count
Beispiel #5
0
def duplicate_nodes(g, nodes, offset):
    """
    Duplicate nodes on a fracture. The number of duplication will depend on
    the cell topology around the node. If the node is not on a fracture 1
    duplicate will be added. If the node is on a single fracture 2 duplicates
    will be added. If the node is on a T-intersection 3 duplicates will be
    added. If the node is on a X-intersection 4 duplicates will be added.
    Equivalently for other types of intersections.

    Parameters:
    ----------
    g         - The grid for which the nodes are duplicated
    nodes     - The nodes to be duplicated
    offset    - How far from the original node the duplications should be
                placed.
    """
    # In the case of a non-zero offset (presumably intended for visualization), use a
    # (somewhat slow) legacy implementation which can handle this.
    if offset != 0:
        return _duplicate_nodes_with_offset(g, nodes, offset)

    # Nodes must be duplicated in the array of node coordinates. Moreover, the face-node
    # relation must be updated so that when a node is split in two or more, all faces on
    # each of the spitting lines / planes are assigned the same version / index of the
    # spit node. The modification of node numbering further means that the face-node relation
    # must be updated also for faces not directly involved in the splitting.
    #
    # The below implementation consists of the following major steps:
    # 1. Isolate clusters of cells surrounding each node to be split, and make connection maps
    #    that include only cells within each cluster.
    # 2. Use the connection map to further subdivide the clusters into parts that lay on
    #    different sides of dividing lines / planes.
    # 3. Modify the face-node relation by splitting nodes. Also update node numbering in
    #    unsplit nodes.
    # 4. Duplicate split nodes in the coordinate array.

    # Bookeeping etc.
    cell_node = g.cell_nodes().tocsr()
    face_node = g.face_nodes.tocsc()
    cell_face = g.cell_faces

    num_nodes_to_duplicate = nodes.size

    ## Step 1
    # Create a list where each item are the cells associated with a node to be expanded.
    cell_clusters = [
        np.unique(sparse_mat.slice_indices(cell_node, n)) for n in nodes
    ]

    # Number of cells in each cluster.
    sz_cell_clusters = [c.size for c in cell_clusters]
    tot_sz = np.sum([sz_cell_clusters])

    # Create a mapping of cells from linear ordering to the clusters.
    # Separate variable for the rows - these will be used to map back from the cluster
    # cell numbering to the standard numbering
    rows_cell_map = np.hstack(cell_clusters)
    cell_map = sps.coo_matrix(
        (np.ones(tot_sz), (rows_cell_map, np.arange(tot_sz))),
        shape=(g.num_cells, tot_sz),
    ).tocsc()

    # Connection map between cells, limited to the cells included in the clusters.
    # Cells may occur more than once in the map (if several of the cell's nodes are to be
    # split) and there may be connections between cells associated with different nodes.
    cf_loc = cell_face * cell_map
    c2c = cf_loc.T * cf_loc
    # All non-zero data signifies connections; simplify the representation
    c2c.data = np.clip(np.abs(c2c.data), 0, 1)

    # The connection matrix is known to be symmetric, and we only need to handle the upper
    # triangular part
    c2c = sps.triu(c2c)

    # Remove matrix elements outside the blocks to decouple connections between cells
    # associated with different nodes. Do this by identifying elements in the sparse
    # storage format outside the blocks, and set their matrix values to zero.
    # This will leave a block diagonal connection matrix, one block per node.

    # All non-zero elements in c2c.
    row_c2c, col_c2c, dat_c2c = sps.find(c2c)

    # Get sorted (increasing columns) version of the matrix. This allows for iteration through
    # the columns of the matrix.
    sort_ind = np.argsort(col_c2c)
    sorted_rows = row_c2c[sort_ind]
    sorted_cols = col_c2c[sort_ind]
    sorted_data = dat_c2c[sort_ind]

    # Array to keep indices to remove
    remove_ind = np.zeros(sorted_rows.size, dtype=np.bool)

    # Array with the start of the blocks corresponding to each cluster.
    block_start = np.hstack((0, np.cumsum([sz_cell_clusters])))

    # Iteration index for the start of the column group in the matrix fields 'indices' and
    # 'data' (referring to the sparse storage).
    col_group_start: int = 0

    # Loop over all groups of columns (one group per node nodes). Find the matrix elements
    # of this block, take note of elements outside the column indices (these will be
    # couplings to other nodes).
    for bi in range(num_nodes_to_duplicate):
        # Data for this block ends with the first column that belongs to the next block.
        # Note that we only search from the start index of this block, and use this as
        # an offset (saves time).
        col_group_end: int = col_group_start + np.argmax(
            sorted_cols[col_group_start:] == block_start[bi + 1])
        # Special case for the last iteration: the last element in block_start has value
        # one higher than the number of rows, thus the equality above is never met, and
        # argmax returns the first element in the comparison. Correct this to let the
        # slice run to the end of the arrays.
        if bi == num_nodes_to_duplicate - 1:
            col_group_end = sorted_cols.size

        # Indices of elements in these rows.
        block_inds = slice(col_group_start, col_group_end)

        # Rows that are outside this block
        outside = np.logical_or(
            sorted_rows[block_inds] < block_start[bi],
            sorted_rows[block_inds] >= block_start[bi + 1],
        )
        # Mark matrix elements belonging to outside rows for removal
        remove_ind[block_inds][outside] = 1

        # The end of this column group becomes the start of the next one.
        col_group_start = col_group_end

    # Remove all data outside the main blocks.
    sorted_data[remove_ind] = 0

    # Make a new, block-diagonal connection matrix.
    # IMPLEMENTATION NOTE: Going to a csc matrix should be straightforward,
    # since sc already is sorted. It is however not clear networkx will be faster
    # with a non-coo matrix.
    c2c_loc = sps.coo_matrix((sorted_data, (sorted_rows, sorted_cols)),
                             shape=c2c.shape)
    # Drop all zero elements
    c2c_loc.eliminate_zeros()

    ## Step 2
    # Now the connection matrix only contains connection between cells that share a node
    # to be duplicated. These can again be split into subclusters, that have lost their
    # connections due to the previous splitting of faces.
    # Identify these subclusters by the use of networkx
    graph = nx.Graph(c2c_loc)
    subclusters = [sorted(list(c)) for c in nx.connected_components(graph)]

    # For each subcluster, find its associated node (to be split)
    node_of_subcluster = []
    search_start = 0
    for comp in subclusters:
        # Find the first element with index one too much, then subtract one.
        # See the above loop (col_group_end) for further comments.
        # Also note we could have used any element in comp.
        ind = search_start + np.argmax(
            block_start[search_start:] > comp[0]) - 1
        # Store this node index
        node_of_subcluster.append(ind)
        # Start of next search interval.
        search_start = ind

    node_of_component = np.array(node_of_subcluster)

    ## Step 3
    # Modify the face-node relation by adjusting the node indices (field indices in the
    # sparse storage of the matrix). The duplicated nodes are added right after the
    # original node in the node ordering. Two adjustments are thus needed: First the
    # insertion of extra nodes, second this insertion increases the index of all nodes
    # with higher index.

    # Copy node-indices in the face-node relation. The first copy will preserve the old
    # node ordering. The second will carry the local adjustments due to the
    old_node_ind = face_node.indices.copy()
    new_node_ind = face_node.indices.copy()

    # Loop over all the subclusters of cells. The faces of the cells that have the
    # associated node to be split have the node index increased, depending on how many
    # times the node has been encountered before.

    # Count the number of encounters for a node.
    node_occ = np.zeros(num_nodes_to_duplicate, dtype=int)

    # Loop over combination of nodes and subclusters
    for ni, comp in zip(node_of_component, subclusters):

        # If the increase in node index is zero, there is no need to do anything.
        if node_occ[ni] == 0:
            node_occ[ni] += 1
            continue

        # Map cell indexes from the ordering in the clusters back to global ordering
        loc_cells = rows_cell_map[comp]
        # Faces of these cells
        loc_faces = np.unique(sparse_mat.slice_indices(g.cell_faces,
                                                       loc_cells))
        # Nodes of the faces, and indices in the sparse storage format where the nodes
        # are located.
        loc_nodes, data_ind = sparse_mat.slice_indices(face_node,
                                                       loc_faces,
                                                       return_array_ind=True)
        # Indices in the sparse storage that should be increased
        incr_ind = data_ind[loc_nodes == nodes[ni]]
        # Increase the node index according to previous encounters.
        new_node_ind[incr_ind] += node_occ[ni]
        # Take note of this iteration
        node_occ[ni] += 1

    # Count the number of repititions in the nodes: The unsplit nodes have 1, the split
    # depends on the number of identified subclusters
    repititions = np.ones(g.num_nodes, dtype=np.int32)
    repititions[nodes] = np.bincount(node_of_component)
    # The number of added nodes
    added = repititions - 1
    num_added = added.sum()

    # Array of cumulative increments due to the splitting of nodes with lower index.
    # Put a zero up front to make the adjustment for the nodes with higher index
    increment = np.cumsum(np.hstack((0, added)))

    # The new node indices are formed by combining the two sources of adjustment.
    # Both split and unsplit nodes are impacted by the increments.
    # The increments must be taken with respect to the old indices
    face_node.indices = (new_node_ind + increment[old_node_ind]).astype(
        np.int32)
    # Ensure the right format of the sparse storage. Somehow this got messed up somewhere.
    face_node.indptr = face_node.indptr.astype(np.int32)

    # Adjust the shape of face-nodes to account for the added nodes
    face_node._shape = (g.num_nodes + num_added, g.num_faces)

    # From the number of repititions of the node (1 for untouched nodes),
    # get mapping from new to old indices.
    # To see how this works, read the documentation of rldecode, including the examples.
    new_2_old_nodes = pp.utils.matrix_compression.rldecode(
        np.arange(repititions.size), repititions)
    g.nodes = g.nodes[:, new_2_old_nodes]
    # The global point ind is shared by all split nodes
    g.global_point_ind = g.global_point_ind[new_2_old_nodes]

    # Also map the tags for nodes that are on fracture tips if this is relevant
    # (that is, if the grid is of the highest dimension)
    keys = ["node_is_fracture_tip", "node_is_tip_of_some_fracture"]
    for key in keys:
        if hasattr(g, "tags") and key in g.tags:
            g.tags[key] = g.tags[key][new_2_old_nodes].astype(bool)

    return num_added
Beispiel #6
0
def duplicate_nodes(g, nodes, offset):
    """
    Duplicate nodes on a fracture. The number of duplication will depend on
    the cell topology around the node. If the node is not on a fracture 1
    duplicate will be added. If the node is on a single fracture 2 duplicates
    will be added. If the node is on a T-intersection 3 duplicates will be
    added. If the node is on a X-intersection 4 duplicates will be added.
    Equivalently for other types of intersections.

    Parameters:
    ----------
    g         - The grid for which the nodes are duplicated
    nodes     - The nodes to be duplicated
    offset    - How far from the original node the duplications should be
                placed.
    """
    node_count = 0

    # We wish to convert the sparse csc matrix to a sparse
    # csr matrix to easily add rows. However, the convertion sorts the
    # indices, which will change the node order when we convert back. We
    # therefore find the inverse sorting of the nodes of each face.
    # After we have performed the row operations we will map the nodes
    # back to their original position.

    _, iv = sort_sub_list(g.face_nodes.indices, g.face_nodes.indptr)
    g.face_nodes = g.face_nodes.tocsr()
    # Iterate over each internal node and split it according to the graph.
    # For each cell attached to the node, we check wich color the cell has.
    # All cells with the same color is then attached to a new copy of the
    # node.
    cell_nodes = g.cell_nodes().tocsr()
    for node in nodes:
        # t_node takes into account the added nodes.
        t_node = node + node_count
        # Find cells connected to node

        cells = sparse_mat.slice_indices(cell_nodes, node)
        #        cell_nodes = g.cell_nodes().tocsr()
        #        ind_ptr = cell_nodes.indptr
        #        cells = cell_nodes.indices[
        #            mcolon(ind_ptr[t_node], ind_ptr[t_node + 1])]
        cells = np.unique(cells)
        # Find the color of each cell. A group of cells is given the same color
        # if they are connected by faces. This means that all cells on one side
        # of a fracture will have the same color, but a different color than
        # the cells on the other side of the fracture. Equivalently, the cells
        # at a X-intersection will be given four different colors
        colors = find_cell_color(g, cells)
        # Find which cells share the same color
        colors, ix = np.unique(colors, return_inverse=True)
        # copy coordinate of old node
        new_nodes = np.repeat(g.nodes[:, t_node, None], colors.size, axis=1)
        faces = np.array([], dtype=int)
        face_pos = np.array([g.face_nodes.indptr[t_node]])
        for j in range(colors.size):
            # For each color we wish to add one node. First we find all faces that
            # are connected to the fracture node, and have the correct cell
            # color
            local_faces = (g.cell_faces[:, cells[ix == j]]).nonzero()[0]
            local_faces = np.unique(local_faces)
            con_to_node = np.ravel(g.face_nodes[t_node, local_faces].todense())
            faces = np.append(faces, local_faces[con_to_node])
            # These faces is then attached to new node number j.
            face_pos = np.append(face_pos, face_pos[-1] + np.sum(con_to_node))
            # If an offset is given, we will change the position of the nodes.
            # We move the nodes a length of offset away from the fracture(s).
            if offset > 0 and colors.size > 1:
                new_nodes[:, j] -= avg_normal(
                    g, local_faces[con_to_node]) * offset
        # The total number of faces should not have changed, only their
        # connection to nodes. We can therefore just update the indices and
        # indptr map.
        g.face_nodes.indices[face_pos[0]:face_pos[-1]] = faces
        node_count += colors.size - 1
        g.face_nodes.indptr = np.insert(g.face_nodes.indptr, t_node + 1,
                                        face_pos[1:-1])
        g.face_nodes._shape = (g.face_nodes.shape[0] + colors.size - 1,
                               g.face_nodes._shape[1])
        # We delete the old node because of the offset. If we do not
        # have an offset we could keep it and add one less node.
        g.nodes = np.delete(g.nodes, t_node, axis=1)
        g.nodes = np.insert(g.nodes, [t_node] * new_nodes.shape[1],
                            new_nodes,
                            axis=1)

    # Transform back to csc format and fix node ordering.
    g.face_nodes = g.face_nodes.tocsc()
    g.face_nodes.indices = g.face_nodes.indices[iv]  # For fast row operation

    return node_count