Пример #1
0
    def test_no_common_points(self):
        p = np.array([[0, 1, 2], [0, 0, 0]])
        p_unique, new_2_old, old_2_new = setmembership.unique_columns_tol(p)

        assert np.allclose(p, p_unique)
        assert np.alltrue(old_2_new == np.arange(3))
        assert np.alltrue(old_2_new == new_2_old)
Пример #2
0
    def test_remove_one_point(self):
        p = np.ones((2, 2))
        p_unique, new_2_old, old_2_new = setmembership.unique_columns_tol(p)

        assert np.allclose(p, np.ones((2, 1)))
        assert np.alltrue(old_2_new == np.zeros(2))
        assert np.alltrue(new_2_old == np.zeros(1))
Пример #3
0
 def create_1d_from_nodes(nodes):
     # From a set of nodes, create a 1d grid. duplicate nodes are removed
     # and we verify that the nodes are indeed colinear
     assert cg.is_collinear(nodes, tol=tol)
     sort_ind = cg.argsort_point_on_line(nodes, tol=tol)
     n = nodes[:, sort_ind]
     unique_nodes, _, _ = unique_columns_tol(n, tol=tol)
     g = TensorGrid(np.arange(unique_nodes.shape[1]))
     g.nodes = unique_nodes
     g.compute_geometry()
     return g, sort_ind
Пример #4
0
 def create_1d_from_nodes(nodes):
     # From a set of nodes, create a 1d grid. duplicate nodes are removed
     # and we verify that the nodes are indeed colinear
     if not pp.geometry_property_checks.points_are_collinear(nodes, tol=tol):
         raise ValueError("Nodes are not colinear")
     sort_ind = pp.map_geometry.sort_points_on_line(nodes, tol=tol)
     n = nodes[:, sort_ind]
     unique_nodes, _, _ = unique_columns_tol(n, tol=tol)
     g = TensorGrid(np.arange(unique_nodes.shape[1]))
     g.nodes = unique_nodes
     g.compute_geometry()
     return g, sort_ind
Пример #5
0
def _segment_2d_split(pts_all, lines, tol):

    # Ensure unique description of points
    pts_all, _, old_2_new = unique_columns_tol(pts_all, tol=tol)
    lines[:2] = old_2_new[lines[:2]]
    to_remove = np.where(lines[0, :] == lines[1, :])[0]
    lines = np.delete(lines, to_remove, axis=1)

    # In some cases the fractures and boundaries impose the same constraint
    # twice, although it is not clear why. Avoid this by uniquifying the lines.
    # This may disturb the line tags in lines[2], but we should not be
    # dependent on those.
    li = np.sort(lines[:2], axis=0)
    _, new_2_old, old_2_new = unique_columns_tol(li, tol=tol)
    lines = lines[:, new_2_old]

    assert np.all(np.diff(lines[:2], axis=0) != 0)

    # We split all fracture intersections so that the new lines do not
    # intersect, except possible at the end points
    logger.info("Remove edge crossings")
    tm = time.time()

    pts_split, lines_split = pp.intersections.split_intersecting_segments_2d(
        pts_all, lines, tol=tol)
    logger.info("Done. Elapsed time " + str(time.time() - tm))

    # Ensure unique description of points
    pts_split, _, old_2_new = unique_columns_tol(pts_split, tol=tol)
    lines_split[:2] = old_2_new[lines_split[:2]]
    to_remove = np.where(lines[0, :] == lines[1, :])[0]
    lines = np.delete(lines, to_remove, axis=1)

    # Remove lines with the same start and end-point.
    # This can be caused by L-intersections, or possibly also if the two
    # endpoints are considered equal under tolerance tol.
    remove_line_ind = np.where(np.diff(lines_split[:2], axis=0)[0] == 0)[0]
    lines_split = np.delete(lines_split, remove_line_ind, axis=1)

    return pts_split, lines_split
Пример #6
0
    def test_remove_one_of_tree(self):
        p = np.array([[1, 1, 0], [1, 1, 0]])
        p_unique, new_2_old, old_2_new = setmembership.unique_columns_tol(p)

        # The sorting of the output depends on how the unique array is computed
        # (see unique_columns_tol for the various options that may be applied).
        # Do a simple sort to ensure we're safe.
        if p_unique[0, 0] == 0:
            assert np.alltrue(np.sort(old_2_new) == np.array([0, 1, 1]))
            assert np.alltrue(np.sort(new_2_old) == np.array([0, 2]))
        else:
            assert np.alltrue(np.sort(old_2_new) == np.array([0, 0, 1]))
            assert np.alltrue(np.sort(new_2_old) == np.array([0, 2]))

        p_known = np.array([[0, 1], [0, 1]])

        for i in range(p_unique.shape[1]):
            assert np.min(np.sum(np.abs(p_known - p_unique[:, i]),
                                 axis=0)) == 0
        for i in range(p_known.shape[1]):
            assert np.min(np.sum(np.abs(p_known[:, i] - p_unique),
                                 axis=0)) == 0
Пример #7
0
    def _find_and_split_intersections(self, constraints):
        # Unified description of points and lines for domain, and fractures

        points = self.pts
        edges = self.edges

        if not np.all(np.diff(edges[:2], axis=0) != 0):
            raise ValueError("Found a point edge in splitting of edges")

        const = constants.GmshConstants()

        tags = np.zeros((2, edges.shape[1]), dtype=np.int)
        tags[0][np.logical_not(self.tags["boundary"])] = const.FRACTURE_TAG
        tags[0][self.tags["boundary"]] = const.DOMAIN_BOUNDARY_TAG
        tags[0][constraints] = const.AUXILIARY_TAG
        tags[1] = np.arange(edges.shape[1])

        edges = np.vstack((edges, tags))

        # Ensure unique description of points
        pts_all, _, old_2_new = unique_columns_tol(points, tol=self.tol)
        edges[:2] = old_2_new[edges[:2]]
        to_remove = np.where(edges[0, :] == edges[1, :])[0]
        lines = np.delete(edges, to_remove, axis=1)

        self.decomposition["domain_boundary_points"] = old_2_new[
            self.decomposition["domain_boundary_points"]]

        # In some cases the fractures and boundaries impose the same constraint
        # twice, although it is not clear why. Avoid this by uniquifying the lines.
        # This may disturb the line tags in lines[2], but we should not be
        # dependent on those.
        li = np.sort(lines[:2], axis=0)
        _, new_2_old, old_2_new = unique_columns_tol(li, tol=self.tol)
        lines = lines[:, new_2_old]

        if not np.all(np.diff(lines[:2], axis=0) != 0):
            raise ValueError(
                "Found a point edge in splitting of edges after merging points"
            )

        # We split all fracture intersections so that the new lines do not
        # intersect, except possible at the end points
        logger.info("Remove edge crossings")
        tm = time.time()

        pts_split, lines_split = pp.intersections.split_intersecting_segments_2d(
            pts_all, lines, tol=self.tol)
        logger.info("Done. Elapsed time " + str(time.time() - tm))

        # Ensure unique description of points
        pts_split, _, old_2_new = unique_columns_tol(pts_split, tol=self.tol)
        lines_split[:2] = old_2_new[lines_split[:2]]
        to_remove = np.where(lines[0, :] == lines[1, :])[0]
        lines = np.delete(lines, to_remove, axis=1)

        self.decomposition["domain_boundary_points"] = old_2_new[
            self.decomposition["domain_boundary_points"]]

        # Remove lines with the same start and end-point.
        # This can be caused by L-intersections, or possibly also if the two
        # endpoints are considered equal under tolerance tol.
        remove_line_ind = np.where(np.diff(lines_split[:2], axis=0)[0] == 0)[0]
        lines_split = np.delete(lines_split, remove_line_ind, axis=1)

        # TODO: This operation may leave points that are not referenced by any
        # lines. We should probably delete these.

        # We find the end points that are shared by more than one intersection
        intersections = self._find_intersection_points(lines_split)

        self.decomposition.update({
            "points": pts_split,
            "edges": lines_split,
            "intersections": intersections,
            "domain": self.domain,
        })
Пример #8
0
def merge_1d_grids(g, h, global_ind_offset=0, tol=1e-4):
    """Merge two 1d grids with non-matching nodes to a single grid.

    The grids should have common start and endpoints. They can be into 3d space
    in a genreal way.

    The function is primarily intended for merging non-conforming DFN grids.

    Parameters:
        g: 1d tensor grid.
        h: 1d tensor grid
        glob_ind_offset (int, defaults to 0): Off set for the global point
            index of the new grid.
        tol (double, defaults to 1e-4): Tolerance for when two nodes are merged
            into one.

    Returns:
        TensorGrid: New tensor grid, containing the combined node definition.
        int: New global ind offset, increased by the number of cells in the
            combined grid.
        np.array (int): Indices of common nodes (after sorting) of g and the
            new grid.
        np.array (int): Indices of common nodes (after sorting) of h and the
            new grid.
        np.array (int): Permutation indices that sort the node coordinates of
            g. The common indices between g and the new grid are found as
            new_grid.nodes[:, g_in_combined] = g.nodes[:, sorted]
        np.array (int): Permutation indices that sort the node coordinates of
            h. The common indices between h and the new grid are found as
            new_grid.nodes[:, h_in_combined] = h.nodes[:, sorted]

    """

    # Nodes of the two 1d grids, combine them
    gp = g.nodes
    hp = h.nodes
    combined = np.hstack((gp, hp))

    num_g = gp.shape[1]
    num_h = hp.shape[1]

    # Keep track of where we put the indices of the original grids
    g_in_full = np.arange(num_g)
    h_in_full = num_g + np.arange(num_h)

    # The tolerance should not be larger than the smallest distance between
    # two points on any of the grids.
    diff_gp = np.min(pp.distances.pointset(gp, True))
    diff_hp = np.min(pp.distances.pointset(hp, True))
    min_diff = np.minimum(tol, 0.5 * np.minimum(diff_gp, diff_hp))

    # Uniquify points
    combined_unique, _, new_2_old = unique_columns_tol(combined, tol=min_diff)
    # Follow locations of the original grid points
    g_in_unique = new_2_old[g_in_full]
    h_in_unique = new_2_old[h_in_full]

    # The combined nodes must be sorted along their natural line.
    # Find the dimension with the largest spatial extension, and sort those
    # coordinates
    max_coord = combined_unique.max(axis=1)
    min_coord = combined_unique.min(axis=1)
    dx = max_coord - min_coord
    sort_dim = np.argmax(dx)

    sort_ind = np.argsort(combined_unique[sort_dim])
    combined_sorted = combined_unique[:, sort_ind]

    # Follow the position of the orginial nodes through sorting
    _, g_sorted = ismember_rows(g_in_unique, sort_ind)
    _, h_sorted = ismember_rows(h_in_unique, sort_ind)

    num_new_grid = combined_sorted.shape[1]

    # Create a new 1d grid.
    # First use a 1d coordinate to initialize topology
    new_grid = pp.TensorGrid(np.arange(num_new_grid))
    # Then set the right, 3d coordinates
    new_grid.nodes = pp.map_geometry.force_point_collinearity(combined_sorted)

    # Set global point indices
    new_grid.global_point_ind = global_ind_offset + np.arange(num_new_grid)
    global_ind_offset += num_new_grid

    return (
        new_grid,
        global_ind_offset,
        g_sorted,
        h_sorted,
        np.arange(num_g),
        np.arange(num_h),
    )
Пример #9
0
    def __init__(self, p, tet=None, name=None):
        """
        Create a tetrahedral grid from a set of point and cells.

        If the cells are not provided a Delaunay tessalation will be
        constructed.

        Parameters:
            p (np.array, 3xn_pt): Coordinates of vertices
            tet (np.array, 4xn_tet, optional): Cell vertices. If none is
                provided, a Delaunay triangulation will be performed.

        """
        # The method below is to a large degree translated from MRST.

        self.dim = 3

        # Transform points to column vector if necessary (scipy.Delaunay
        # requires this format)
        if tet is None:
            tet = scipy.spatial.Delaunay(p.transpose())
            tet = tet.simplices
            tet = tet.transpose()

        if name is None:
            name = "TetrahedralGrid"

        num_nodes = p.shape[1]

        nodes = p
        assert num_nodes > 3  # Check of transposes of point array

        num_cells = tet.shape[1]
        tet = self.__permute_nodes(p, tet)

        # Define face-nodes so that the first column contains fn of cell 0,
        # etc.
        face_nodes = np.vstack(
            (tet[[1, 0, 2]], tet[[0, 1, 3]], tet[[2, 0, 3]], tet[[1, 2, 3]]))
        # Reshape face-nodes into a 3x 4*num_cells-matrix, with the four first
        # columns belonging to cell 0.
        face_nodes = face_nodes.reshape((3, 4 * num_cells), order="F")
        sort_ind = np.squeeze(np.argsort(face_nodes, axis=0))
        face_nodes_sorted = np.sort(face_nodes, axis=0)
        face_nodes, _, cell_faces = setmembership.unique_columns_tol(
            face_nodes_sorted)

        num_faces = face_nodes.shape[1]

        num_nodes_per_face = 3
        face_nodes = face_nodes.ravel(order="F")
        indptr = np.hstack((
            np.arange(0, num_nodes_per_face * num_faces, num_nodes_per_face),
            num_nodes_per_face * num_faces,
        ))
        data = np.ones(face_nodes.shape, dtype=bool)
        face_nodes = sps.csc_matrix((data, face_nodes, indptr),
                                    shape=(num_nodes, num_faces))

        # Cell face relation
        num_faces_per_cell = 4
        indptr = np.hstack((
            np.arange(0, num_faces_per_cell * num_cells, num_faces_per_cell),
            num_faces_per_cell * num_cells,
        ))
        data = np.ones(cell_faces.shape)
        sgn_change = np.where(np.any(np.diff(sort_ind, axis=0) == 1,
                                     axis=0))[0]
        data[sgn_change] = -1
        cell_faces = sps.csc_matrix((data, cell_faces, indptr),
                                    shape=(num_faces, num_cells))

        super(TetrahedralGrid, self).__init__(3, nodes, face_nodes, cell_faces,
                                              "TetrahedralGrid")
Пример #10
0
def triangle_grid(fracs, domain, do_snap_to_grid=False, **kwargs):
    """
    Generate a gmsh grid in a 2D domain with fractures.

    The function uses modified versions of pygmsh and mesh_io,
    both downloaded from github.

    To be added:
    Functionality for tuning gmsh, including grid size, refinements, etc.

    Parameters
    ----------
    fracs: (dictionary) Two fields: points (2 x num_points) np.ndarray,
        edges (2 x num_lines) connections between points, defines fractures.
    box: (dictionary) keys xmin, xmax, ymin, ymax, [together bounding box
        for the domain]
    do_snap_to_grid (boolean, optional): If true, points are snapped to an
        underlying Cartesian grid with resolution tol before geometry
        computations are carried out. This used to be the standard, but
        indications are it is better not to do this. This keyword construct is
        a stop-gap measure to invoke the old functionality if desired. This
        option will most likely dissapear in the future.
    **kwargs: To be explored.

    Returns
    -------
    list (length 3): For each dimension (2 -> 0), a list of all grids in
        that dimension.

    Examples
    p = np.array([[-1, 1, 0, 0], [0, 0, -1, 1]])
    lines = np.array([[0, 2], [1, 3]])
    char_h = 0.5 * np.ones(p.shape[1])
    tags = np.array([1, 3])
    fracs = {'points': p, 'edges': lines}
    box = {'xmin': -2, 'xmax': 2, 'ymin': -2, 'ymax': 2}
    g = triangle_grid(fracs, box)

    """
    logger.info("Create 2d mesh")

    # Verbosity level
    verbose = kwargs.get("verbose", 1)

    # File name for communication with gmsh
    file_name = kwargs.get("file_name", "gmsh_frac_file")
    kwargs.pop("file_name", str())

    tol = kwargs.get("tol", 1e-4)

    in_file = file_name + ".geo"
    out_file = file_name + ".msh"

    # Pick out fracture points, and their connections
    frac_pts = fracs["points"]
    frac_con = fracs["edges"]

    # Unified description of points and lines for domain, and fractures
    pts_all, lines, domain_pts = __merge_domain_fracs_2d(
        domain, frac_pts, frac_con)

    # Snap to underlying grid before comparing points
    if do_snap_to_grid:
        pts_all = cg.snap_to_grid(pts_all, tol)

    assert np.all(np.diff(lines[:2], axis=0) != 0)

    # Ensure unique description of points
    pts_all, _, old_2_new = unique_columns_tol(pts_all, tol=tol)
    lines[:2] = old_2_new[lines[:2]]
    to_remove = np.where(lines[0, :] == lines[1, :])[0]
    lines = np.delete(lines, to_remove, axis=1)

    # In some cases the fractures and boundaries impose the same constraint
    # twice, although it is not clear why. Avoid this by uniquifying the lines.
    # This may disturb the line tags in lines[2], but we should not be
    # dependent on those.
    li = np.sort(lines[:2], axis=0)
    _, new_2_old, old_2_new = unique_columns_tol(li, tol=tol)
    lines = lines[:, new_2_old]

    assert np.all(np.diff(lines[:2], axis=0) != 0)

    # We split all fracture intersections so that the new lines do not
    # intersect, except possible at the end points
    logger.info("Remove edge crossings")
    tm = time.time()
    pts_split, lines_split = cg.remove_edge_crossings(pts_all,
                                                      lines,
                                                      tol=tol,
                                                      snap=do_snap_to_grid)
    logger.info("Done. Elapsed time " + str(time.time() - tm))

    # Ensure unique description of points
    if do_snap_to_grid:
        pts_split = cg.snap_to_grid(pts_split, tol)

    pts_split, _, old_2_new = unique_columns_tol(pts_split, tol=tol)
    lines_split[:2] = old_2_new[lines_split[:2]]
    to_remove = np.where(lines[0, :] == lines[1, :])[0]
    lines = np.delete(lines, to_remove, axis=1)

    # Remove lines with the same start and end-point.
    # This can be caused by L-intersections, or possibly also if the two
    # endpoints are considered equal under tolerance tol.
    remove_line_ind = np.where(np.diff(lines_split[:2], axis=0)[0] == 0)[0]
    lines_split = np.delete(lines_split, remove_line_ind, axis=1)

    # TODO: This operation may leave points that are not referenced by any
    # lines. We should probably delete these.

    # We find the end points that are shared by more than one intersection
    intersections = __find_intersection_points(lines_split)

    # Gridding size
    if "mesh_size_frac" in kwargs.keys():
        # Tag points at the domain corners
        logger.info("Determine mesh size")
        tm = time.time()
        boundary_pt_ind = ismember_rows(pts_split, domain_pts, sort=False)[0]
        mesh_size, pts_split, lines_split = tools.determine_mesh_size(
            pts_split, boundary_pt_ind, lines_split, **kwargs)

        logger.info("Done. Elapsed time " + str(time.time() - tm))
    else:
        mesh_size = None

    # gmsh options

    meshing_algorithm = kwargs.get("meshing_algorithm")

    # Create a writer of gmsh .geo-files
    gw = gmsh_interface.GmshWriter(
        pts_split,
        lines_split,
        domain=domain,
        mesh_size=mesh_size,
        intersection_points=intersections,
        meshing_algorithm=meshing_algorithm,
    )
    gw.write_geo(in_file)

    triangle_grid_run_gmsh(file_name, **kwargs)
    return triangle_grid_from_gmsh(file_name, **kwargs)
Пример #11
0
def triangle_grid(fracs, domain, **kwargs):
    """
    Generate a gmsh grid in a 2D domain with fractures.

    The function uses modified versions of pygmsh and mesh_io,
    both downloaded from github.

    To be added:
    Functionality for tuning gmsh, including grid size, refinements, etc.

    Parameters
    ----------
    fracs: (dictionary) Two fields: points (2 x num_points) np.ndarray,
        edges (2 x num_lines) connections between points, defines fractures.
    box: (dictionary) keys xmin, xmax, ymin, ymax, [together bounding box
        for the domain]
    **kwargs: To be explored.

    Returns
    -------
    list (length 3): For each dimension (2 -> 0), a list of all grids in
        that dimension.

    Examples
    p = np.array([[-1, 1, 0, 0], [0, 0, -1, 1]])
    lines = np.array([[0, 2], [1, 3]])
    char_h = 0.5 * np.ones(p.shape[1])
    tags = np.array([1, 3])
    fracs = {'points': p, 'edges': lines}
    box = {'xmin': -2, 'xmax': 2, 'ymin': -2, 'ymax': 2}
    g = triangle_grid(fracs, box)

    """
    # Verbosity level
    verbose = kwargs.get('verbose', 1)

    # File name for communication with gmsh
    file_name = kwargs.get('file_name', 'gmsh_frac_file')
    kwargs.pop('file_name', str())

    tol = kwargs.get('tol', 1e-4)

    in_file = file_name + '.geo'
    out_file = file_name + '.msh'

    # Pick out fracture points, and their connections
    frac_pts = fracs['points']
    frac_con = fracs['edges']

    # Unified description of points and lines for domain, and fractures
    pts_all, lines = __merge_domain_fracs_2d(domain, frac_pts, frac_con)

    # Snap to underlying grid before comparing points
    pts_all = cg.snap_to_grid(pts_all, tol)

    assert np.all(np.diff(lines[:2], axis=0) != 0)

    # Ensure unique description of points
    pts_all, _, old_2_new = unique_columns_tol(pts_all, tol=tol)
    lines[:2] = old_2_new[lines[:2]]
    to_remove = np.where(lines[0, :] == lines[1, :])[0]
    lines = np.delete(lines, to_remove, axis=1)

    # In some cases the fractures and boundaries impose the same constraint
    # twice, although it is not clear why. Avoid this by uniquifying the lines.
    # This may disturb the line tags in lines[2], but we should not be
    # dependent on those.
    li = np.sort(lines[:2], axis=0)
    li_unique, new_2_old, old_2_new = unique_columns_tol(li)
    lines = lines[:, new_2_old]

    assert np.all(np.diff(lines[:2], axis=0) != 0)

    # We split all fracture intersections so that the new lines do not
    # intersect, except possible at the end points
    pts_split, lines_split = cg.remove_edge_crossings(pts_all, lines, tol=tol)

    # Ensure unique description of points
    pts_split = cg.snap_to_grid(pts_split, tol)
    pts_split, _, old_2_new = unique_columns_tol(pts_split, tol=tol)
    lines_split[:2] = old_2_new[lines_split[:2]]
    to_remove = np.where(lines[0, :] == lines[1, :])[0]
    lines = np.delete(lines, to_remove, axis=1)

    # Remove lines with the same start and end-point.
    # This can be caused by L-intersections, or possibly also if the two
    # endpoints are considered equal under tolerance tol.
    remove_line_ind = np.where(np.diff(lines_split[:2], axis=0)[0] == 0)[0]
    lines_split = np.delete(lines_split, remove_line_ind, axis=1)

    # TODO: This operation may leave points that are not referenced by any
    # lines. We should probably delete these.

    # We find the end points that is shared by more than one intersection
    intersections = __find_intersection_points(lines_split)

    # Gridding size
    if 'mesh_size' in kwargs.keys():
        mesh_size, mesh_size_bound, pts_split, lines_split = \
            utils.determine_mesh_size(pts_split, lines_split,
                                      **kwargs['mesh_size'])
    else:
        mesh_size = None
        mesh_size_bound = None

    # gmsh options

    meshing_algorithm = kwargs.get('meshing_algorithm')

    # Create a writer of gmsh .geo-files
    gw = gmsh_interface.GmshWriter(pts_split,
                                   lines_split,
                                   domain=domain,
                                   mesh_size=mesh_size,
                                   mesh_size_bound=mesh_size_bound,
                                   intersection_points=intersections,
                                   meshing_algorithm=meshing_algorithm)
    gw.write_geo(in_file)

    triangle_grid_run_gmsh(file_name, **kwargs)
    return triangle_grid_from_gmsh(file_name, **kwargs)
Пример #12
0
def lines_from_csv(f_name,
                   tagcols=None,
                   tol=1e-8,
                   max_num_fracs=None,
                   **kwargs):
    """ Read csv file with fractures to obtain fracture description.

    Create the grid bucket from a set of fractures stored in a csv file and a
    domain. In the csv file, we assume the following structure:
    FID, START_X, START_Y, END_X, END_Y

    Where FID is the fracture id, START_X and START_Y are the abscissa and
    coordinate of the starting point, and END_X and END_Y are the abscissa and
    coordinate of the ending point.

    To change the delimiter from the default comma, use kwargs passed to
    np.genfromtxt.

    The csv file is assumed to have a header of 1 line. To change this number,
    use kwargs skip_header.

    Parameters:
        f_name (str): Path to csv file
        tagcols (array-like, int. Optional): Column index where fracture tags
            are stored. 0-offset. Defaults to no columns.
        tol (double, optional): Tolerance for merging points with almost equal
            coordinates.
        max_num_fracs (int, optional): Maximum number of fractures included,
            counting from the start of the file. Defaults to inclusion of all
            fractures.
        **kwargs: keyword arguments passed on to np.genfromtxt.

    Returns:
        np.ndarray (2 x num_pts): Point coordinates used in the fracture
            description.
        np.ndarray (2+numtags x num_fracs): Fractures, described by their start
            and endpoints (first and second row). If tags are assigned to the
            fractures, these are stored in rows 2,...

    """
    npargs = {}
    # EK: Should these really be explicit keyword arguments?
    npargs['delimiter'] = kwargs.get('delimiter', ',')
    npargs['skip_header'] = kwargs.get('skip_header', 1)

    # Extract the data from the csv file
    data = np.genfromtxt(f_name, **npargs)
    if data.size == 0:
        return np.empty((2, 0)), np.empty((2, 0), dtype=np.int)
    data = np.atleast_2d(data)
    if max_num_fracs is not None:
        data = data[:max_num_fracs]

    num_fracs = data.shape[0] if data.size > 0 else 0
    num_data = data.shape[1] if data.size > 0 else 0

    pt_cols = np.arange(1, num_data)
    if tagcols is not None:
        pt_cols = np.setdiff1d(pt_cols, tagcols)

    pts = data[:, pt_cols].reshape((-1, 2)).T

    # Let the edges correspond to the ordering of the fractures
    edges = np.vstack((np.arange(0, 2 * num_fracs,
                                 2), np.arange(1, 2 * num_fracs, 2)))
    if tagcols is not None:
        edges = np.vstack((edges, data[:, tagcols].T))

    pts, _, old_2_new = unique_columns_tol(pts, tol=tol)
    edges[:2] = old_2_new[edges[:2]]

    to_remove = np.where(edges[0, :] == edges[1, :])[0]
    edges = np.delete(edges, to_remove, axis=1)

    assert np.all(np.diff(edges[:2], axis=0) != 0)

    return pts, edges.astype(np.int)
Пример #13
0
def network_2d_from_csv(f_name,
                        tagcols=None,
                        tol=1e-8,
                        max_num_fracs=None,
                        polyline=False,
                        return_frac_id=False,
                        domain=None,
                        **kwargs):
    """ Read csv file with fractures to obtain fracture description.

    Create the grid bucket from a set of fractures stored in a csv file and a
    domain. In the csv file, we assume one of the two following structures:

        a) FID, START_X, START_Y, END_X, END_Y
        b) FID, PT_X, PT_Y

    Format a) is used to describe fractures consisting of a straight line.
    FID is the fracture id, START_X and START_Y are the abscissa and
    coordinate of the starting point, and END_X and END_Y are the abscissa and
    coordinate of the ending point.

    Format b) can be used to describe polyline fractures: Each row in the file
    represents a separate points, points with the same FID will be assigned to
    the same fracture *in the order specified in the file*.

    To change the delimiter from the default comma, use kwargs passed to
    np.genfromtxt.

    The csv file is assumed to have a header of 1 line. To change this number,
    use kwargs skip_header.

    Parameters:
        f_name (str): Path to csv file
        tagcols (array-like, int. Optional): Column index where fracture tags
            are stored. 0-offset. Defaults to no columns.
        tol (double, optional): Tolerance for merging points with almost equal
            coordinates.
        max_num_fracs (int, optional): Maximum number of fractures included,
            counting from the start of the file. Defaults to inclusion of all
            fractures.
        **kwargs: keyword arguments passed on to np.genfromtxt.

    Returns:
        FractureNetwork2d: Network representation of the fractures

    Raises:
        ValueError: If a fracture of a single point is specified.

    """
    npargs = {}
    # EK: Should these really be explicit keyword arguments?
    npargs["delimiter"] = kwargs.get("delimiter", ",")
    npargs["skip_header"] = kwargs.get("skip_header", 1)

    # Extract the data from the csv file
    data = np.genfromtxt(f_name, **npargs)
    # Shortcut if no data is loaded
    if data.size == 0:
        # we still consider the possibility that a domain is given
        return pp.FractureNetwork2d(domain=domain, tol=tol)
    data = np.atleast_2d(data)

    # Consider subset of fractures if asked for
    if max_num_fracs is not None:
        if max_num_fracs == 0:
            return pp.FractureNetwork2d(tol=tol)
        else:
            data = data[:max_num_fracs]

    num_fracs = data.shape[0] if data.size > 0 else 0
    num_data = data.shape[1] if data.size > 0 else 0

    pt_cols = np.arange(1, num_data)
    if tagcols is not None:
        pt_cols = np.setdiff1d(pt_cols, tagcols)

    pts = data[:, pt_cols].reshape((-1, 2)).T

    if polyline:
        frac_id = data[:, 0]
        fracs = np.unique(frac_id)

        edges = np.empty((2, 0))
        edges_frac_id = np.empty(0)
        pt_ind = np.arange(frac_id.size)

        for fi in fracs:
            ind = np.argwhere(frac_id == fi).ravel()
            if ind.size < 2:
                raise ValueError(
                    "A fracture should consist of more than one line")
            if ind.size == 2:
                edges_loc = np.array([[pt_ind[ind[0]]], [pt_ind[ind[1]]]])
            else:
                start = pt_ind[ind[0]:ind[-1]]
                end = pt_ind[ind[1]:ind[-1] + 1]
                edges_loc = np.vstack((start, end))

            edges = np.hstack((edges, edges_loc))
            edges_frac_id = np.hstack(
                (edges_frac_id, [fi] * edges_loc.shape[1]))

        edges = edges.astype(np.int)
        edges_frac_id = edges_frac_id.astype(np.int)

    else:
        # Let the edges correspond to the ordering of the fractures
        edges = np.vstack((np.arange(0, 2 * num_fracs,
                                     2), np.arange(1, 2 * num_fracs, 2)))
        # Fracture id is the first column of data
        edges_frac_id = data[:, 0]
        if tagcols is not None:
            edges = np.vstack((edges, data[:, tagcols].T))

    if domain is None:
        overlap = kwargs.get("domain_overlap", 0)
        domain = pp.bounding_box.from_points(pts, overlap)

    pts, _, old_2_new = unique_columns_tol(pts, tol=tol)

    edges[:2] = old_2_new[edges[:2].astype(np.int)]

    to_remove = np.where(edges[0, :] == edges[1, :])[0]
    edges = np.delete(edges, to_remove, axis=1).astype(np.int)

    if not np.all(np.diff(edges[:2], axis=0) != 0):
        raise ValueError

    network = pp.FractureNetwork2d(pts, edges, domain, tol=tol)

    if return_frac_id:
        edges_frac_id = np.delete(edges_frac_id, to_remove)
        return network, edges_frac_id.astype(np.int)
    else:
        return network