示例#1
0
def get_triangles(points):

    return Delaunay(points).simplices
示例#2
0
def thiessen(directions_list, field_ra_deg, field_dec_deg, faceting_radius_deg,
    s=None, check_edges=False, target_ra=None, target_dec=None,
    target_radius_arcmin=None, beam_ratio=None):
    """
    Generates and add thiessen polygons or patches to input directions

    Parameters
    ----------
    directions_list : list of Direction objects
        List of input directions
    field_ra_deg : float
        RA in degrees of field center
    field_dec_deg : float
        Dec in degrees of field center
    faceting_radius_deg : float
        Maximum radius within which faceting will be done. Direction objects
        with postions outside this radius will get small rectangular patches
        instead of thiessen polygons
    s : LSMTool SkyModel object, optional
        Sky model to use to check for source near facet edges
    check_edges : bool, optional
        If True, check whether any know source falls on a facet edge. If sources
        are found that do, the facet is adjusted
    target_ra : str, optional
        RA of target source. E.g., '14h41m01.884'
    target_dec : str, optional
        Dec of target source. E.g., '+35d30m31.52'
    target_radius_arcmin : float, optional
        Radius in arcmin of target source
    beam_ratio : float, optional
        Ratio of semi-major (N-S) axis to semi-minor (E-W) axis for the primary
        beam

    """
    import lsmtool
    import shapely.geometry
    from shapely.ops import cascaded_union
    from itertools import combinations
    from astropy.coordinates import Angle

    # Select directions inside FOV (here defined as ellipse given by
    # faceting_radius_deg and the mean elevation)
    faceting_radius_pix = faceting_radius_deg / 0.066667 # radius in pixels
    field_x, field_y = radec2xy([field_ra_deg], [field_dec_deg],
        refRA=field_ra_deg, refDec=field_dec_deg)

    fx = []
    fy = []
    for th in range(0, 360, 1):
        fx.append(faceting_radius_pix * np.cos(th * np.pi / 180.0) + field_x[0])
        fy.append(faceting_radius_pix * beam_ratio * np.sin(th * np.pi / 180.0) + field_y[0])
    fov_poly_tuple = tuple([(xp, yp) for xp, yp in zip(fx, fy)])
    fov_poly = Polygon(fx, fy)

    points, _, _ = getxy(directions_list, field_ra_deg, field_dec_deg)
    for x, y, d in zip(points[0], points[1], directions_list):
        dist = fov_poly.is_inside(x, y)
        if dist < 0.0:
            # Source is outside of FOV, so use simple rectangular patches
            d.is_patch = True

    # Now do the faceting (excluding the patches)
    directions_list_thiessen = [d for d in directions_list if not d.is_patch]
    points, _, _ = getxy(directions_list_thiessen, field_ra_deg, field_dec_deg)
    points = points.T

    # Generate array of outer points used to constrain the facets
    nouter = 64
    means = np.ones((nouter, 2)) * points.mean(axis=0)
    offsets = []
    angles = [np.pi/(nouter/2.0)*i for i in range(0, nouter)]
    for ang in angles:
        offsets.append([np.cos(ang), np.sin(ang)])

    # Generate initial facets
    radius = 5.0 * faceting_radius_deg / 0.066667 # radius in pixels
    scale_offsets = radius * np.array(offsets)
    outer_box = means + scale_offsets
    points_all = np.vstack([points, outer_box])
    tri = Delaunay(points_all)
    circumcenters = np.array([_circumcenter(tri.points[t])
                              for t in tri.vertices])
    thiessen_polys = [_thiessen_poly(tri, circumcenters, n)
                      for n in range(len(points_all) - nouter)]

    # Check for vertices that are very close to each other, as this gives problems
    # to the edge adjustment below
    for thiessen_poly in thiessen_polys:
        dup_ind = 0
        for i, (v1, v2) in enumerate(zip(thiessen_poly[:-1], thiessen_poly[1:])):
            if (approx_equal(v1[0], v2[0], rel=1e-6) and
                approx_equal(v1[1], v2[1], rel=1e-6)):
                thiessen_poly.pop(dup_ind)
                dup_ind -= 1
            dup_ind += 1

    # Clip the facets at FOV
    for i, thiessen_poly in enumerate(thiessen_polys):
        polyv = np.vstack(thiessen_poly)
        poly_tuple = tuple([(xp, yp) for xp, yp in zip(polyv[:, 0], polyv[:, 1])])
        p1 = shapely.geometry.Polygon(poly_tuple)
        p2 = shapely.geometry.Polygon(fov_poly_tuple)
        if p1.intersects(p2):
            p1 = p1.intersection(p2)
            xyverts = [np.array([xp, yp]) for xp, yp in
                zip(p1.exterior.coords.xy[0].tolist(),
                p1.exterior.coords.xy[1].tolist())]
            thiessen_polys[i] = xyverts

    # Check for sources near / on facet edges and adjust regions accordingly
    if check_edges:
        log.info('Adjusting facets to avoid sources...')
        RA, Dec = s.getPatchPositions(asArray=True)
        sx, sy = radec2xy(RA, Dec, refRA=field_ra_deg, refDec=field_dec_deg)
        sizes = s.getPatchSizes(units='degree').tolist()
        fluxes_jy = s.getColValues('I', units='Jy', aggregate='sum').tolist()

        if target_ra is not None and target_dec is not None and target_radius_arcmin is not None:
            log.info('Including target ({0}, {1}) in facet adjustment'.format(
                target_ra, target_dec))
            tra = Angle(target_ra).to('deg').value
            tdec = Angle(target_dec).to('deg').value
            tx, ty = radec2xy([tra], [tdec], refRA=field_ra_deg, refDec=field_dec_deg)
            sx.extend(tx)
            sy.extend(ty)
            sizes.append(target_radius_arcmin*2.0/1.2/60.0)
            fluxes_jy.append(0.0)

        # Set minimum size to 2 - 10 * FWHM of resolution of high-res image, scaled by
        # sqrt(flux) to include strong artifacts in the avoidance region
        fwhm = 25.0 / 3600.0 # degrees
        min_sizes = [fwhm*min(10.0, max(2.0, np.sqrt(flux_jy/0.01))) for flux_jy in fluxes_jy]
        sizes = [max(size, min_size) for size, min_size in zip(sizes, min_sizes)]

        # Filter sources to get only those close to a boundary. We need to iterate
        # until no sources are found
        niter = 0
        while niter < 3:
            niter += 1
            ind_near_edge = []
            for i, thiessen_poly in enumerate(thiessen_polys):
                polyv = np.vstack(thiessen_poly)
                poly_tuple = tuple([(x, y) for x, y in zip(polyv[:, 0], polyv[:, 1])])
                poly = Polygon(polyv[:, 0], polyv[:, 1])
                dists = poly.is_inside(sx, sy)
                for j, dist in enumerate(dists):
                    pix_radius = sizes[j] * 1.2 / 2.0 / 0.066667 # radius of source in pixels
                    if abs(dist) < pix_radius and j not in ind_near_edge:
                        ind_near_edge.append(j)
            if len(ind_near_edge) == 0:
                break
            sx_filt = np.array(sx)[ind_near_edge]
            sy_filt = np.array(sy)[ind_near_edge]
            sizes_filt = np.array(sizes)[ind_near_edge]

            # Adjust all facets for each source near a boundary
            for x, y, size in zip(sx_filt, sy_filt, sizes_filt):
                for i, thiessen_poly in enumerate(thiessen_polys):
                    polyv = np.vstack(thiessen_poly)
                    poly_tuple = tuple([(xp, yp) for xp, yp in zip(polyv[:, 0], polyv[:, 1])])
                    poly = Polygon(polyv[:, 0], polyv[:, 1])
                    dist = poly.is_inside(x, y)
                    p1 = shapely.geometry.Polygon(poly_tuple)

                    pix_radius = size * 1.2 / 2.0 / 0.066667 # size of source in pixels
                    if abs(dist) < pix_radius:
                        p2 = shapely.geometry.Point((x, y))
                        p2buf = p2.buffer(pix_radius)
                        if dist < 0.0:
                            # If point is outside, difference the polys
                            p1 = p1.difference(p2buf)
                        else:
                            # If point is inside, union the polys
                            p1 = p1.union(p2buf)
                        try:
                            xyverts = [np.array([xp, yp]) for xp, yp in
                                zip(p1.exterior.coords.xy[0].tolist(),
                                p1.exterior.coords.xy[1].tolist())]
                        except AttributeError:
                            log.error('Source avoidance has caused a facet to be '
                                'divided into multple parts. Please adjust the '
                                'parameters (e.g., if a target source is specified, '
                                'reduce its radius if possible)')
                            sys.exit(1)
                        thiessen_polys[i] = xyverts

    # Add the final facet and patch info to the directions
    patch_polys = []
    for d in directions_list:
        # Make calibrator patch
        sx, sy = radec2xy([d.ra], [d.dec], refRA=field_ra_deg, refDec=field_dec_deg)

        # Compute size of patch in pixels, with a factor of 0.8 so that
        # sources are not added along the edges (the areas outside of this 80%
        # region are masked during imaging in the make_clean_mask() call with
        # trim_by = 0.2
        patch_width = d.cal_imsize * 0.8 * d.cellsize_selfcal_deg / 0.066667
        x0 = sx[0] - patch_width / 2.0
        y0 = sy[0] - patch_width / 2.0
        selfcal_poly = [np.array([x0, y0]),
                np.array([x0, y0+patch_width]),
                np.array([x0+patch_width, y0+patch_width]),
                np.array([x0+patch_width, y0])]

        if d.is_patch:
            # For sources beyond max radius, set facet poly to calibrator poly
            # and clip to facet polys and previous patch polys
            #
            # First, make a copy of the calibrator poly so that it is not
            # altered by the clipping
            patch_poly = [np.copy(vert) for vert in selfcal_poly]

            # Now loop over the facets and patches and clip
            for facet_poly in thiessen_polys + patch_polys:
                polyv = np.vstack(facet_poly)
                poly_tuple = tuple([(xp, yp) for xp, yp in zip(polyv[:, 0], polyv[:, 1])])
                p1 = shapely.geometry.Polygon(poly_tuple)
                p2 = shapely.geometry.Polygon(patch_poly)
                if p2.intersects(p1):
                    p2 = p2.difference(p1)
                    try:
                        xyverts = [np.array([xp, yp]) for xp, yp in
                            zip(p2.exterior.coords.xy[0].tolist(),
                            p2.exterior.coords.xy[1].tolist())]
                        patch_poly = xyverts
                    except AttributeError:
                        pass

            add_facet_info(d, selfcal_poly, patch_poly, field_ra_deg, field_dec_deg)
            patch_polys.append(patch_poly)
        else:
            facet_poly = thiessen_polys[directions_list_thiessen.index(d)]
            add_facet_info(d, selfcal_poly, facet_poly, field_ra_deg, field_dec_deg)
示例#3
0
def linear_tetrahedron_integration(cell,
                                   eigs,
                                   energies,
                                   weights=None,
                                   comm=world):
    """DOS from linear tetrahedron interpolation.

    cell: 3x3 ndarray-like
        Unit cell.
    eigs: (n1, n2, n3, nbands)-shaped ndarray
        Eigenvalues on a Monkhorst-Pack grid (not reduced).
    energies: 1-d array-like
        Energies where the DOS is calculated (must be a uniform grid).
    weights: ndarray of shape (n1, n2, n3, nbands) or (n1, n2, n3, nbands, nw)
        Weights.  Defaults to a (n1, n2, n3, nbands)-shaped ndarray
        filled with ones.  Can also have an extra dimednsion if there are
        nw weights.
    comm: communicator object
            MPI communicator for lti_dos

    Returns:

        DOS as an ndarray of same length as energies or as an
        ndarray of shape (nw, len(energies)).

    See:

        Extensions of the tetrahedron method for evaluating
        spectral properties of solids,
        A. H. MacDonald, S. H. Vosko and P. T. Coleridge,
        1979 J. Phys. C: Solid State Phys. 12 2991,
        https://doi.org/10.1088/0022-3719/12/15/008
    """

    from scipy.spatial import Delaunay

    # Find the 6 tetrahedra:
    size = eigs.shape[:3]
    B = (np.linalg.inv(cell) / size).T
    indices = np.array([[i, j, k] for i in [0, 1] for j in [0, 1]
                        for k in [0, 1]])
    dt = Delaunay(np.dot(indices, B))

    if weights is None:
        weights = np.ones_like(eigs)

    if weights.ndim == 4:
        extra_dimension_added = True
        weights = weights[:, :, :, :, np.newaxis]
    else:
        extra_dimension_added = False

    nweights = weights.shape[4]
    dos = np.empty((nweights, len(energies)))

    lti_dos(indices[dt.simplices], eigs, weights, energies, dos, comm)

    dos /= np.prod(size)

    if extra_dimension_added:
        return dos[0]
    return dos
示例#4
0
def cell_connectivity_exact(disc):
    """

    Calculates contour events of the cells and its neighbors.

    :param disc: An object containing the discretization information.
    :type disc: :class:`bet.sample.discretization`

    :rtype: list
    :returns: list of lists of neighboring cells

    """
    from scipy.spatial import Delaunay
    from collections import defaultdict
    import itertools
    import numpy.linalg as nlinalg

    # Check inputs
    if not isinstance(disc, samp.discretization):
        msg = "The argument must be of type bet.sample.discretization."
        raise wrong_argument_type(msg)

    if not isinstance(disc._input_sample_set, samp.voronoi_sample_set):
        msg = "disc._input_sample_set must be of type bet.sample.voronoi"
        msg += "_sample_set defined with the 2-norm"
        raise wrong_argument_type(msg)
    elif disc._input_sample_set._p_norm != 2.0:
        msg = "disc._input_sample_set must be of type bet.sample.voronoi"
        msg += "_sample_set defined with the 2-norm"
        raise wrong_argument_type(msg)

    num = disc.check_nums()
    # Set up necessary pointers
    if disc.get_io_ptr() is None:
        disc.set_io_ptr()

    if disc._input_sample_set._dim == 1:
        # Adding contours on the left
        s_sort = disc._input_sample_set._values.flat[:].argsort()
        neiList = defaultdict(set)
        for p in range(num):
            order = s_sort[p]
            if order > 0:
                val = np.equal(s_sort, order - 1)
                args = np.argwhere(val)
                neiList[p].add(disc._io_ptr[args[0][0]])
                neiList[args[0][0]].add(disc._io_ptr[p])
    else:
        # Form Delaunay triangulation
        tri = Delaunay(disc._input_sample_set._values)

        # Find neighbors
        neiList = defaultdict(set)
        for p in tri.vertices:
            for i, j in itertools.combinations(p, 2):
                neiList[i].add(disc._io_ptr[j])
                neiList[j].add(disc._io_ptr[i])
    # Get rid of redundant entries
    for i in range(num):
        neiList[i] = list(set(neiList[i]))

    return neiList
示例#5
0
    def update(self, samples):

        # init - add all samples if no data yet
        if len(self.__data) == 0:
            self.__data = samples
            return

        # =======================================
        # 1.  Reduce data/sample data

        if self.dim_reduction > 0:
            basis, mean, var = ExtractMaxVarComponents(self.__data,
                                                       self.dim_reduction)
            self.log_expl_var.append(var)
        else:
            basis, mean = ExtractSubspace(self.__data, 0.8)

        cluster_reduced = ProjectOntoSubspace(self.__data, mean, basis)
        samples_reduced = ProjectOntoSubspace(samples, mean, basis)
        dims = np.shape(cluster_reduced)
        # select minimum data to build convex hull
        # min_nr_elems = dims[1] + 4
        if self.__verbose:
            print "Reducing dimension: {}->{}".format(
                np.shape(self.__data)[1], dims[1])

        # =======================================
        # 2.  Calculate Convex Hull in subspace

        data_hull = cluster_reduced  # take all samples of data
        hull = Delaunay(data_hull)
        if self.__verbose:
            print "Calculating data hull using {}/{} points".format(
                len(data_hull), len(self.__data))

        # =======================================
        # 3.  Select new samples from outside convex hull

        if not self.__inverted:
            inclusion_mask = np.array([
                False if hull.find_simplex(sample) >= 0 else True
                for sample in samples_reduced
            ])
            if self.__verbose:
                # Todo: use outside mask counting
                nr_elems_outside_hull = np.sum([
                    0 if hull.find_simplex(sample) >= 0 else 1
                    for sample in samples_reduced
                ])
                print "Elements OUTSIDE hull (to include): {}/{}".format(
                    nr_elems_outside_hull, len(samples))
        else:
            inclusion_mask = np.array([
                True if hull.find_simplex(sample) >= 0 else False
                for sample in samples_reduced
            ])

        # add samples (samples need to be np.array)
        self.__data = np.concatenate((self.__data, samples[inclusion_mask]))

        # =======================================
        # 4.  Recalculate hull with newly added points

        # If memory exceeded: Perform unrefinement process -
        # discharge sampling directions with lowest variance contribution
        if self.dim_reduction > 0:
            nr_comps = self.dim_reduction if len(
                self.__data) <= self.max_size else self.dim_removal
            if len(self.__data) > 150:
                nr_comps = self.dim_removal - 1
            basis, mean, var = ExtractMaxVarComponents(self.__data, nr_comps)
        else:
            # automatic dimension selection (based on containing certain variance)
            basis, mean = ExtractSubspace(self.__data, 0.75)

        cluster_reduced = ProjectOntoSubspace(self.__data, mean, basis)
        print "Recuding dimension: {}->{}".format(
            np.shape(self.__data)[1],
            np.shape(cluster_reduced)[1])
        hull = Delaunay(cluster_reduced)

        # =======================================
        # 5.  Discharge samples inside hull

        if not self.__inverted:
            # select samples inside hull
            cl_to_delete = np.array(
                list(
                    set(range(0, len(cluster_reduced))) -
                    set(np.unique(hull.convex_hull))))
            # set(range(len(data_hull))).difference(hull.convex_hull)
        else:
            cl_to_delete = np.array([])
            # select samples on hull
            if len(cluster_reduced) > self.max_size:
                hull_indices = list(np.unique(hull.convex_hull))
                if len(hull_indices) > 0:
                    nr_to_del = 5 if len(hull_indices) > 5 else 0
                    cl_to_delete = np.array(hull_indices[0:nr_to_del])

        # print "Points building convex hull: {}".format(set(np.unique(hull.convex_hull)))
        # print "To delete: {}".format(cl_to_delete)

        if len(cl_to_delete[cl_to_delete < 0]) > 0:
            print set(np.unique(hull.convex_hull))
            log.warning("Index elements smaller than 0: {}".format(
                cl_to_delete[cl_to_delete < 0]))

        if self.__log:
            self.log_intra_deleted.append(len(cl_to_delete))
            self.log_cl_size_orig.append(len(self.__data))

        print "Cleaning {} points from inside data".format(len(cl_to_delete))

        # Remove points from inside hull
        self.__data = np.delete(self.__data, cl_to_delete, axis=0)

        # =======================================
        # 6.  KNN point removal: remove similar points

        if self.knn_removal_thresh > 0:
            max_removal = 10 if len(
                self.__data) > self.knn_removal_thresh else 0
            if max_removal > 0:
                filter = KNFilter(self.__data, k=3, threshold=0.25)
                tmp = filter.filter_x_samples(max_removal)
                print "--- Removing {} knn points".format(
                    len(self.__data) - len(tmp))
                self.__data = tmp

            if self.__log:
                self.log_cl_size_reduced.append(len(self.__data))

        print "Cluster size: {}".format(len(self.__data))
示例#6
0
def get_intersect_block(filename_low, filename_height, output_dir, zoom, end_zoom):
    '''
    在低精度 tif 中,找到高精度 tif 所影响的地形块,并修改受影响值 生成 terrain
    :param filename: tif 文件路径
    :param zoom: 层级
    '''
    if (not os.path.exists(filename_low)) or (not os.path.exists(filename_height)):
        return None
    data_set_low = gdal.Open(filename_low)
    if data_set_low is None:
        print('Read %s failed' %filename_low)
        return None
    data_set_height = gdal.Open(filename_height)
    if data_set_height is None:
        print('Read %s failed' % filename_height)
        return None

    band_low = data_set_low.GetRasterBand(1)
    if band_low is None:
        print('Read %s band failed' % filename_low)
        return None
    x_size_low, y_size_low = data_set_low.RasterXSize, data_set_low.RasterYSize
    x0_low, dx_low, _, y0_low, _, dy_low = data_set_low.GetGeoTransform()

    band_height = data_set_height.GetRasterBand(1)
    if band_height is None:
        print('Read %s band failed' % filename_height)
        return None
    x_size_height, y_size_height = data_set_height.RasterXSize, data_set_height.RasterYSize
    x0_height, dx_height, _, y0_height, _, dy_height = data_set_height.GetGeoTransform()

    for _zoom in range(zoom, end_zoom + 1):
        # 获取高精度 tif 所涉及的 地形块
        bounds = get_tif_tile_bounds(filename_height, output_dir, _zoom)
        # [[min_lng, min_lat, max_lng, max_lat], ...]
        if bounds == {}:
            continue
        _, value = next(iter(bounds.items()))
        zoom_size_low = round((value[2]-value[0]) / dx_low * (2 ** (_zoom - zoom)))
        for fname in bounds:
            bound = bounds[fname]
            x_min_low = round((bound[0] - x0_low) / dx_low)
            y_min_low = round((bound[3] - y0_low) / dy_low)
            x_max_low = round((bound[2] - x0_low) / dx_low)
            y_max_low = round((bound[1] - y0_low) / dy_low)
            # 读取低精度 地形 要求低精度地形完整包含整块, 读取时 添加 buffer 防止平滑处理时边界错位
            buf_size = 8
            buf_size_low = round(buf_size / zoom_size_low * (x_max_low - x_min_low))
            x_begine_low = x_min_low - buf_size_low
            if x_begine_low > x_size_low or x_begine_low < 0:
                print('Low-precision terrain does not completely cover %s' % fname)
                continue
            y_begine_low = y_min_low - buf_size_low
            if y_begine_low > y_size_low or y_begine_low < 0:
                print('Low-precision terrain does not completely cover %s' % fname)
                continue
            x_end_low = x_max_low + buf_size_low
            if x_end_low < 0 or x_end_low > x_size_low:
                print('Low-precision terrain does not completely cover %s' % fname)
                continue
            y_end_low = y_max_low + buf_size_low
            if y_end_low < 0 or y_end_low > y_size_low:
                print('Low-precision terrain does not completely cover %s' % fname)
                continue
            z_low = band_low.ReadAsArray(x_begine_low,
                                         y_begine_low,
                                         x_end_low - x_begine_low,
                                         y_end_low - y_begine_low).astype('f4')
            z_low = cv2.resize(z_low, (zoom_size_low + buf_size * 2, zoom_size_low + buf_size * 2), interpolation=cv2.INTER_LINEAR)

            # # 读取 高精度 地形
            # x_min_height = round((bound[0] - x0_height) / dx_height)
            # y_min_height = round((bound[3] - y0_height) / dy_height)
            # x_max_height = round((bound[2] - x0_height) / dx_height)
            # y_max_height = round((bound[1] - y0_height) / dy_height)
            #
            # buf_size_height_left = 0
            # if x_min_height > x_size_height:
            #     print('Height-precision terrain does not completely cover %s' % fname)
            #     continue
            # if x_min_height < 0:
            #     buf_size_height_left = round(max((x_min_height - 0) / (x_max_height - x_min_height) * zoom_size_low, 0))
            #     x_min_height = 0
            #
            # buf_size_height_botton = 0
            # if y_min_height > y_size_height:
            #     print('Height-precision terrain does not completely cover %s' % fname)
            #     continue
            # if y_min_height < 0:
            #     buf_size_height_botton = round(
            #         max((y_min_height - 0) / (x_max_height - x_min_height) * zoom_size_low, 0))
            #     y_min_height = 0
            #
            # buf_size_height_right = 0
            # if x_max_height < 0:
            #     print('Height-precision terrain does not completely cover %s' % fname)
            #     continue
            # if x_max_height > x_size_height:
            #     buf_size_height_right = round(
            #         max((x_size_height - x_max_height) / (x_max_height - x_min_height) * zoom_size_low, 0))
            #     x_max_height = x_size_height
            #
            # buf_size_height_top = 0
            # if y_max_height < 0:
            #     print('Height-precision terrain does not completely cover %s' % fname)
            #     continue
            # if y_max_height > y_size_height:
            #     buf_size_height_top = round(
            #         max((y_size_height - y_max_height) / (x_max_height - x_min_height) * zoom_size_low, 0))
            #     y_max_height = y_size_height
            #
            # zoom_size_x = round(zoom_size_low * (x_max_height - x_min_height) / (x_max_height - x_min_height))
            # zoom_size_y = round(zoom_size_low * (y_max_height - y_min_height) / (y_max_height - y_min_height))
            # _z = band_low.ReadAsArray(x_min_height, y_min_height, x_max_height - x_min_height, y_max_height - y_min_height).astype('f4')
            # z_height = cv2.resize(_z, (zoom_size_x, zoom_size_y), interpolation=cv2.INTER_CUBIC)
            #
            # # 求 高精度地形相对于 低精度地形 offset
            # lng_height = x0_height + x_min_height * dx_height
            # lat_height = y0_height + y_min_height * dy_height
            # lng_low = x0_low + x_min_low * dx_low
            # lat_low = y0_low + y_min_low * dy_low
            # x_offset = round((lng_height - lng_low) / dx_low)
            # y_offset = round((lat_height - lat_low) / dy_low)

            # for y in range(zoom_size_y):
            #     if y + y_offset >= len(z_low):
            #         continue
            #     for x in range(zoom_size_x):
            #         if x + x_offset >= len(z_low[y + y_offset]):
            #             continue
            #         z_height_v = z_height[y][x]
            #         z_low_v = z_low[y + y_offset][x + x_offset]
            #         if z_height_v != 0 and z_height_v != z_low_v:
            #             z_low[y + y_offset][x + x_offset] = z_height_v

            z_low = z_low[buf_size:-buf_size, buf_size:-buf_size]

            # 生成 terrain 文件
            points_xyz = []
            z_low = np.array(z_low)
            for x in range(z_low.shape[1]):
                for y in range(z_low.shape[0]):
                    point_xyz = [x, y, z_low[y, x]]
                    points_xyz.append(point_xyz)
            points_xyz = np.array(points_xyz)
            points_xy = points_xyz[:, 0:2]
            tri = Delaunay(points_xy)
            index = tri.simplices
            points_xyz = (points_xyz + (x_min_low, y_min_low, 0)) * (dx_low, dy_low, 1) + (x0_low, y0_low, 0)
            write_terrain(fname, points_xyz, index)
示例#7
0
for i in range(len(orgdata)):
    print(orgdata[i])
print(" ...",len(orgdata),"個のデータを読み込みました")
"""
newdata = []
zdata = []

for i in range(len(orgdata)):
    zdata.append(orgdata[i][2])
    for j in range(2):
        newdata.append(orgdata[i][j])

pts = np.array(newdata).reshape(-1, 2)
ztmp = np.array(zdata)

tri = Delaunay(pts)

newdata = pts.tolist()

for i in range(len(newdata)):
    newdata[i].append(zdata[i])

pts = np.array(newdata)

fig = plt.figure()

print("ドロネー完了")
print(len(pts))
print(len(pts[tri.simplices]))

#出力
def flatten(img):
    points = []
    h, w = np.shape(img)

    # Add all valid points to "points"
    for i in range(h):
        for j in range(w):
            if not img[i, j] == 0 and img[i, j] <= 40:
                points.append([i, j, img[i, j]])

    points = np.asarray(points)
    point_list = list(points)
    N = len(point_list)

    # Use RANSAC to get ground plane as "gnd_list" and the rest as "obj_list"
    max_iterations = 100
    goal_inliers = len(point_list) * 0.3
    m, _ = run_ransac(point_list, estimate,
                      lambda x, y: is_inlier(x, y, 0.0005), 3, goal_inliers,
                      max_iterations)
    list_1, list_2 = get_others(point_list, m,
                                lambda x, y: is_inlier(x, y, 0.03))

    a, b, c, d = m
    _, _, zz = plot_plane(a, b, c, d, h, w)
    h1 = int(round(np.average(zz)))

    goal_inliers = len(list_2) * 0.3
    m, _ = run_ransac(list_2, estimate, lambda x, y: is_inlier(x, y, 0.0005),
                      3, goal_inliers, max_iterations)
    list_3, list_4 = get_others(list_2, m, lambda x, y: is_inlier(x, y, 0.03))

    a, b, c, d = m
    _, _, zz = plot_plane(a, b, c, d, h, w)
    h2 = int(round(np.average(zz)))

    if h1 < h2:
        gnd_list = list_1
        obj_list = list_2
        gnd_height = h1
        step_height = h2
    else:
        gnd_list = list_3
        obj_list = list_1 + list_4
        gnd_height = h2
        step_height = h1

    if len(obj_list):
        obj = np.asarray(obj_list)
        filt_obj_list = []
        obj_2d = obj[:, :2]

        if len(obj_list) > N * 0.1:
            topview = np.zeros((h, w, 2))
            obj_img = np.zeros((h, w))
            for o in obj_list:
                obj_img[o[0], o[1]] = o[2]
            obj_img = obj_img.astype(np.uint8)
            obj_img = cv2.bilateralFilter(obj_img, 5, 75, 75)
            for o in obj_list:
                if obj_img[o[0], o[1]] > gnd_height + 1:
                    filt_obj_list.append(o)
                else:
                    gnd_list.append([o[0], o[1], gnd_height])

        if len(filt_obj_list):
            filt_obj = np.asarray(filt_obj_list)
            filt_obj_2d = filt_obj[:, :2]

            # CLUSTER
            estimator = AgglomerativeClustering(linkage='single')
            estimator.fit(obj_2d)
            labels = estimator.labels_
            cluster_list = [[] for c in range(estimator.n_clusters)]
            for idx, val in enumerate(obj_list):
                cluster_list[labels[idx]].append(val)

            for idx, cluster in enumerate(cluster_list):
                N_total = len(cluster)
                N_now = N_total
                cluster = np.asarray(cluster)
                h_cnt = {}
                for p in cluster:
                    if not p[2] in h_cnt:
                        h_cnt[p[2]] = 0
                    h_cnt[p[2]] += 1

                while h_cnt:
                    height = max(h_cnt)
                    height_count = h_cnt[height]
                    del h_cnt[height]
                    if height_count / float(N_total) < 0.1 or height_count < 10:
                        continue
                    plane = cluster[cluster[:, 2] == height]
                    cluster = cluster[cluster[:, 2] != height]

                    plane_2d = plane[:, :2]
                    N_now = len(cluster)

                    try:
                        hull = ConvexHull(plane_2d)
                    except Exception as e:
                        continue
                    edge_2d = np.array(plane_2d[hull.vertices])
                    c_x = sum([p[0] for p in plane_2d]) / len(plane_2d)
                    c_y = sum([p[1] for p in plane_2d]) / len(plane_2d)

                    tri = Delaunay(edge_2d)

                    fill = []
                    for i in range(min(edge_2d[:, 0]), max(edge_2d[:, 0]) + 1):
                        for j in range(min(edge_2d[:, 1]),
                                       max(edge_2d[:, 1]) + 1):
                            fill.append([i, j])
                    fill = np.array(fill)
                    mask = in_hull(fill, tri)
                    fill = fill[mask]
                    if topview[c_x, c_y, 0] == 0 or abs(topview[c_x, c_y, 1] -
                                                        height) > 2:
                        for f in fill:
                            if topview[f[0], f[1], 0] == 0:
                                img[f[0], f[1]] = height
                            topview[f[0], f[1], 0] = height
                            topview[f[0], f[1], 1] = height
                    else:
                        new_height = topview[c_x, c_y, 0]
                        for f in fill:
                            if topview[f[0], f[1], 0] == 0:
                                img[f[0], f[1]] = new_height
                            topview[f[0], f[1], 0] = new_height
                            topview[f[0], f[1], 1] = height

    if len(gnd_list):
        gnd = np.asarray(gnd_list)
        for g in gnd:
            img[g[0], g[1]] = 0

    img[img > step_height] = 0
    # print("Out")
    # print(h,w,img[0,0],img[0,w-1],img[h-1,0],img[h-1,w-1])
    return img
示例#9
0
def _make_pixel_cache(subject,
                      xfmname,
                      height=1024,
                      thick=32,
                      depth=0.5,
                      sampler='nearest'):
    from scipy import sparse
    from scipy.spatial import Delaunay
    flat, polys = db.get_surf(subject, "flat", merge=True, nudge=True)
    valid = np.unique(polys)
    fmax, fmin = flat.max(0), flat.min(0)
    size = fmax - fmin
    aspect = size[0] / size[1]
    width = int(aspect * height)
    grid = np.mgrid[fmin[0]:fmax[0]:width * 1j,
                    fmin[1]:fmax[1]:height * 1j].reshape(2, -1)

    mask, extents = get_flatmask(subject, height=height)
    assert mask.shape[0] == width and mask.shape[1] == height

    # Get barycentric coordinates
    dl = Delaunay(flat[valid, :2])
    simps = dl.find_simplex(grid.T[mask.ravel()])
    missing = simps == -1
    tfms = dl.transform[simps]
    l1, l2 = (tfms[:, :2].transpose(1, 2, 0) *
              (grid.T[mask.ravel()] - tfms[:, 2]).T).sum(1)
    l3 = 1 - l1 - l2

    ll = np.vstack([l1, l2, l3])
    ll[:, missing] = 0

    from ..mapper import samplers
    xfm = db.get_xfm(subject, xfmname, xfmtype='coord')
    sampclass = getattr(samplers, sampler)

    # Transform fiducial vertex locations to pixel locations using barycentric xfm
    try:
        pia, polys = db.get_surf(subject, "pia", merge=True, nudge=False)
        wm, polys = db.get_surf(subject, "wm", merge=True, nudge=False)
        piacoords = xfm(
            (pia[valid][dl.vertices][simps] * ll[np.newaxis].T).sum(1))
        wmcoords = xfm(
            (wm[valid][dl.vertices][simps] * ll[np.newaxis].T).sum(1))

        valid_p = np.array([
            np.all((0 <= piacoords), axis=1), piacoords[:, 0] < xfm.shape[2],
            piacoords[:, 1] < xfm.shape[1], piacoords[:, 2] < xfm.shape[0]
        ])
        valid_p = np.all(valid_p, axis=0)

        valid_w = np.array([
            np.all((0 <= wmcoords), axis=1), wmcoords[:, 0] < xfm.shape[2],
            wmcoords[:, 1] < xfm.shape[1], wmcoords[:, 2] < xfm.shape[0]
        ])
        valid_w = np.all(valid_w, axis=0)

        valid = np.logical_and(valid_p, valid_w)
        vidx = np.nonzero(valid)[0]
        mapper = sparse.csr_matrix((mask.sum(), np.prod(xfm.shape)))
        if thick == 1:
            i, j, data = sampclass(
                piacoords[valid] * depth + wmcoords[valid] * (1 - depth),
                xfm.shape)
            mapper = mapper + sparse.csr_matrix(
                (data / float(thick), (vidx[i], j)), shape=mapper.shape)
            return mapper

        for t in np.linspace(0, 1, thick + 2)[1:-1]:
            i, j, data = sampclass(
                piacoords[valid] * t + wmcoords[valid] * (1 - t), xfm.shape)
            mapper = mapper + sparse.csr_matrix(
                (data / float(thick), (vidx[i], j)), shape=mapper.shape)
        return mapper

    except IOError:
        fid, polys = db.get_surf(subject, "fiducial", merge=True)
        fidcoords = xfm(
            (fid[valid][dl.vertices][simps] * ll[np.newaxis].T).sum(1))

        valid = reduce(np.logical_and, [
            reduce(np.logical_and,
                   (0 <= fidcoords).T), fidcoords[:, 0] < xfm.shape[2],
            fidcoords[:, 1] < xfm.shape[1], fidcoords[:, 2] < xfm.shape[0]
        ])

        vidx = np.nonzero(valid)[0]

        i, j, data = sampclass(fidcoords[valid], xfm.shape)
        csrshape = mask.sum(), np.prod(xfm.shape)
        return sparse.csr_matrix((data, (vidx[i], j)), shape=csrshape)
示例#10
0
print('Введите количество точек по оси rho')
n_rho = int(input())

z_0, z_f = 0, 200
rho_0, rho_f = 300, 320

points = []
rho = np.linspace(rho_0, rho_f, n_rho)
z = np.linspace(z_0, z_f, n_z)
points = []
for r_i in rho:
    for z_i in z:
        points.append([z_i, r_i])
points = np.array(points)
Th = Delaunay(points)  #Делаем триангуляцию Делоне
'''
Везде далее: 
K - координаты симплексного КЭ в R^2
K = [[z1,r1],[z2,r2],[z3,r3]]
'''


#Вычиление Якобиана
def detB(K):
    z1 = K[0][0]
    z2 = K[1][0]
    z3 = K[2][0]
    r1 = K[0][1]
    r2 = K[1][1]
    r3 = K[2][1]
def in_hull(p, hull):
    if not isinstance(hull, Delaunay):
        hull = Delaunay(hull)

    return hull.find_simplex(p) >= 0
示例#12
0
def triangulate_poly(verts):
    from scipy.spatial import Delaunay
    return Delaunay(verts).simplices
示例#13
0
cv.imwrite("output2.jpg", output2)
"""
if quad2.toMatrix():
	print quad2.Matrix
"""

cv.imwrite('mask.jpg', quad2.Matrix)
cv.imwrite('mask2.jpg', quad2.Opening)

cv.imwrite('canny.jpg', cv.Canny(image, 100, 250))

points = quad2.getPoints()
print 'check point a'

tri = Delaunay(points)
plt.triplot(points[:, 0], points[:, 1], tri.simplices.copy())
plt.plot(points[:, 0], points[:, 1], 'o')
plt.show()

points2 = np.array(quad2.Edges)
print tri.simplices.copy()

tri2 = Delaunay(points2)
plt.triplot(points2[:, 0], points2[:, 1], tri2.simplices.copy())
plt.plot(points2[:, 0], points2[:, 1], 'o')
plt.show()

pointsA = quad2.getPoints()
print 'check point a'
示例#14
0
    def find_adsorption_sites(self,
                              distance=2.0,
                              put_inside=True,
                              symm_reduce=1e-2,
                              near_reduce=1e-2,
                              positions=['ontop', 'bridge', 'hollow'],
                              no_obtuse_hollow=True):
        """
        Finds surface sites according to the above algorithm.  Returns
        a list of corresponding cartesian coordinates.

        Args:
            distance (float): distance from the coordinating ensemble
                of atoms along the miller index for the site (i. e.
                the distance from the slab itself)
            put_inside (bool): whether to put the site inside the cell
            symm_reduce (float): symm reduction threshold
            near_reduce (float): near reduction threshold
            positions (list): which positions to include in the site finding
                "ontop": sites on top of surface sites
                "bridge": sites at edges between surface sites in Delaunay
                    triangulation of surface sites in the miller plane
                "hollow": sites at centers of Delaunay triangulation faces
                "subsurface": subsurface positions projected into miller plane
            no_obtuse_hollow (bool): flag to indicate whether to include
                obtuse triangular ensembles in hollow sites
        """
        ads_sites = {k: [] for k in positions}
        if 'ontop' in positions:
            ads_sites['ontop'] = [s.coords for s in self.surface_sites]
        if 'subsurface' in positions:
            # Get highest site
            ref = self.slab.sites[np.argmax(self.slab.cart_coords[:, 2])]
            # Project diff between highest site and subs site into miller
            ss_sites = [
                self.mvec * np.dot(ref.coords - s.coords, self.mvec) + s.coords
                for s in self.subsurface_sites()
            ]
            ads_sites['subsurface'] = ss_sites
        if 'bridge' in positions or 'hollow' in positions:
            mesh = self.get_extended_surface_mesh()
            sop = get_rot(self.slab)
            dt = Delaunay([sop.operate(m.coords)[:2] for m in mesh])
            # TODO: refactor below to properly account for >3-fold
            for v in dt.simplices:
                if -1 not in v:
                    dots = []
                    for i_corner, i_opp in zip(range(3),
                                               ((1, 2), (0, 2), (0, 1))):
                        corner, opp = v[i_corner], [v[o] for o in i_opp]
                        vecs = [
                            mesh[d].coords - mesh[corner].coords for d in opp
                        ]
                        vecs = [vec / np.linalg.norm(vec) for vec in vecs]
                        dots.append(np.dot(*vecs))
                        # Add bridge sites at midpoints of edges of D. Tri
                        if 'bridge' in positions:
                            ads_sites["bridge"].append(
                                self.ensemble_center(mesh, opp))
                    # Prevent addition of hollow sites in obtuse triangles
                    obtuse = no_obtuse_hollow and (np.array(dots) < 1e-5).any()
                    # Add hollow sites at centers of D. Tri faces
                    if 'hollow' in positions and not obtuse:
                        ads_sites['hollow'].append(
                            self.ensemble_center(mesh, v))
        ads_sites['all'] = sum(ads_sites.values(), [])
        for key, sites in ads_sites.items():
            # Pare off outer sites for bridge/hollow
            if key in ['bridge', 'hollow']:
                frac_coords = [
                    self.slab.lattice.get_fractional_coords(ads_site)
                    for ads_site in sites
                ]
                frac_coords = [
                    frac_coord for frac_coord in frac_coords
                    if (frac_coord[0] > 1 and frac_coord[0] < 4
                        and frac_coord[1] > 1 and frac_coord[1] < 4)
                ]
                sites = [
                    self.slab.lattice.get_cartesian_coords(frac_coord)
                    for frac_coord in frac_coords
                ]
            if near_reduce:
                sites = self.near_reduce(sites, threshold=near_reduce)
            if put_inside:
                sites = [
                    put_coord_inside(self.slab.lattice, coord)
                    for coord in sites
                ]
            if symm_reduce:
                sites = self.symm_reduce(sites, threshold=symm_reduce)
            sites = [site + distance * self.mvec for site in sites]

            ads_sites[key] = sites
        return ads_sites
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay, delaunay_plot_2d

# The Delaunay triangulation of a set of random points:

points = np.random.rand(30, 2)
tri = Delaunay(points)

# Plot it:

_ = delaunay_plot_2d(tri)
plt.show()
示例#16
0
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay

import pylab
import numpy

# 10 random points (x,y) in the plane
x, y = numpy.array(numpy.random.standard_normal((2, 30)))
print(x)
print(y)
tri = Delaunay(x, y)

for t in tri:  # t[0], t[1], t[2] are the points indexes of the triangle
    t_i = [t[0], t[1], t[2], t[0]]
    print(t_i)
    pylab.plot(x[t_i], y[t_i])

pylab.plot(x, y, 'o')
pylab.show()
示例#17
0
def generate_quadric(K, nstep=50, ax=1, ay=1, random_sampling=True,
                     ratio=0.2, random_distribution_type='gaussian'):
    """
    generate a quadric mesh
    ratio and random_distribution_type parameters are unused if
    random_sampling is set to False
    :param K:
    :param nstep:
    :param ax:
    :param ay:
    :param random_sampling:
    :param ratio:
    :param random_distribution_type:
    :return:
    """

    # Parameters
    xmin, xmax = [-ax, ax]
    ymin, ymax = [-ay, ay]

    # Coordinates
    stepx = (xmax - xmin) / nstep
    x = np.arange(xmin, xmax, stepx)
    # x, stepx = np.linspace(xmin, xmax, nstep, retstep=True)
    stepy = stepx * np.sqrt(3) / 2  # to ensure equilateral faces
    y = np.arange(ymin, ymax, stepy)
    # y = np.linspace(ymin, ymax, nstep)
    X, Y = np.meshgrid(x, y)
    X[::2] += stepx / 2
    # Y += np.sqrt(3) / 2
    X = X.flatten()
    Y = Y.flatten()

    if random_sampling:
        sigma = stepx * ratio  # characteristic size of the mesh * ratio
        nb_vert = len(x) * len(y)
        if random_distribution_type == 'gamma':
            theta = np.random.rand(nb_vert, ) * np.pi * 2
            mean = sigma
            variance = sigma ** 2
            radius = \
                np.random.gamma(mean ** 2 / variance, variance / mean, nb_vert)
            X = X + radius * np.cos(theta)
            Y = Y + radius * np.sin(theta)
        elif random_distribution_type == 'uniform':
            X = X + np.random.uniform(-1, 1, 100)
            Y = Y + np.random.uniform(-1, 1, 100)
        else:
            X = X + sigma * np.random.randn(nb_vert, )
            Y = Y + sigma * np.random.randn(nb_vert, )

    # Delaunay triangulation, based on scipy binding of Qhull.
    # See https://scipy.github.io/devdocs/generated/scipy.spatial.Delaunay.html#scipy.spatial.Delaunay
    # and http://www.qhull.org/html/qdelaun.htm for more informations
    faces_tri = Delaunay(np.vstack((X, Y)).T, qhull_options='QJ Qt Qbb')# Qbb Qc Qz Qj')

    Z = quadric(K[0], K[1])(X, Y)
    coords = np.array([X, Y, Z]).transpose()

    return trimesh.Trimesh(faces=faces_tri.simplices,
                           vertices=coords,
                           process=False)
示例#18
0
def juego():
    global p,V,i,l,Area1,Area2,area,maximo
    #Se obtiene la posicion donde se ha hecho click
    raton = pygame.mouse.get_pos()
    #Si se selecciona el boton de return se debe volver al menu
    if return_rect.collidepoint(raton):
        exec(open('Juego.py').read())

    #Si el raton sale del tablero no debe ocurrir nada
    if raton[1]>700:
        return
    #Para cuando hay menos de 3 puntos no se puede obtener la triangulacion
    if len(p)<3:
        V=[]
        Area1=0
        Area2=0

        #Si se selecciona la posicion ya ocupada por otro punto se hace un pequeño desplazamiento inapreciable y se añade el punto
        if raton in p:
            p.append((raton[0]+l,raton[1]+l))
            l=l*(-1)
        #Se añade el punto
        else:
            p.append(raton)

        #Se calcula la region asociada al punto como interseccion de semiplanos
        for k in range(len(p)):
                #Se hace uso de la funcion VoronoiRegion
                V.append(voronoiRegion(p,k))
                #Los puntos con indice par son del primer jugador
                if k%2:
                    pygame.draw.polygon(ventana,(0,0,250),V[k])
                    pygame.draw.polygon(ventana,(0,0,0),V[k],2)
                    #Se pinta el punto
                    pygame.draw.circle(ventana,(0,0,0),p[k],5)
                    #Se calcula el area asociada al punto y se suma al area total de ese jugador
                    Area1=Area(V[k])+Area1
                #Los puntos con indice  impar son del primer jugador
                else :
                    pygame.draw.polygon(ventana,(250,0,0),V[k])
                    pygame.draw.polygon(ventana,(0,0,0),V[k],2)
                    #Se pinta el punto
                    pygame.draw.circle(ventana,(0,0,0),p[k],5)
                    #Se calcula el area
                    Area2=Area(V[k])+Area2
        #Se muestra el marcador
        Poner_marcador()
        return
    #Caso para cuando hay mas de 4 puntos en el tablero, ya se pueden obtener las regiones mediante la triangulacion
    #Limitado el numero de puntos maximo que puede poner cada jugador
    if len(p)<2*maximo:
        #Si se intenta hacer click sobre un punto ya existente se hace un ligero e inapreciable desplazamiento
        if raton in p:
                p.append((raton[0]+l,raton[1]-l))
                l=l*(-1)
        #Se añade el punto
        else:
            p.append(raton)
        #Se calcula la triangulacion del conjunto de puntos
        D=Delaunay(p)
        #Se obtiene el diagrama de Voronoi a partir de la triangulacion
        V=Voronoi(D)
        Area1=0
        Area2=0
        #Se pintan los puntos, las regiones y se calcula el area asociada a cada region
        for v in range (len(p)):
            #Regiones de indice par son las asociadas a los puntos del primer jugador
            if v%2:
                pygame.draw.polygon(ventana,(0,0,250),V[v])
                pygame.draw.polygon(ventana,(0,0,0),V[v],2)
                #Se pintan los puntos
                pygame.draw.circle(ventana,(0,0,0),p[v],5)
                Area1=Area(V[v])+Area1
            #Regiones de indice impar son las asociadas a los puntos del segundo jugador
            else:
                pygame.draw.polygon(ventana,(250,0,0),V[v])
                pygame.draw.polygon(ventana,(0,0,0),V[v],2)
                #Se pintan los puntos
                pygame.draw.circle(ventana,(0,0,0),p[v],5)
                Area2=Area(V[v])+Area2
        #Se muestra el marcador
        Poner_marcador()
    return
示例#19
0
def merge_and_cut(filename_low, filename_height, output_dir, start_zoom, end_zoom):
    if not os.path.exists(filename_low) or not os.path.exists(filename_height):
        return None
    data_set_low = gdal.Open(filename_low)
    if data_set_low is None:
        print('Read %s failed' % filename_low)
        return None
    data_set_height = gdal.Open(filename_height)
    if data_set_height is None:
        print('Read %s failed' % filename_height)
        return None
    band_low = data_set_low.GetRasterBand(1)
    if band_low is None:
        print('Read %s band failed' % filename_low)
        return None
    x_size_low, y_size_low = data_set_low.RasterXSize, data_set_low.RasterYSize
    x0_low, dx_low, _, y0_low, _, dy_low = data_set_low.GetGeoTransform()

    band_height = data_set_height.GetRasterBand(1)
    if band_height is None:
        print('Read %s band failed' % filename_height)
        return None
    x_size_height, y_size_height = data_set_height.RasterXSize, data_set_height.RasterYSize
    x0_height, dx_height, _, y0_height, _, dy_height = data_set_height.GetGeoTransform()

    min_lng_height = x0_height
    min_lat_height = y0_height
    max_lng_height = x0_height + x_size_height * dx_height
    max_lat_height = y0_height + y_size_height * dy_height
    min_lng_low = x0_low
    min_lat_low = y0_low
    max_lng_low = x0_low + x_size_low * dx_low
    max_lat_low = y0_low + y_size_low * dy_low
    x_offset = round((min_lng_height - min_lng_low) / dx_low)
    y_offset = round((min_lat_height - min_lat_low) / dy_low)
    x_size = round((max_lng_height - min_lng_height) / dx_low)
    y_size = round((max_lat_height - min_lat_height) / dy_low)

    bounds = get_tif_tile_bounds(filename_height, output_dir, start_zoom)
    if bounds == {}:
        return
    _, value = next(iter(bounds.items()))
    _zoom_size_low = round((value[2] - value[0]) / dx_low)
    buf_size = _zoom_size_low + 128     # 设置 buf 大小大于一块地形的 size,保证按块取数据时,块是完整的

    z_low_with_buf = band_low.ReadAsArray(x_offset - buf_size, y_offset - buf_size, x_size + buf_size * 2, y_size +
                                          buf_size * 2).astype('f4')
    # z_height = band_height.ReadAsArray(0, 0, x_size_height, y_size_height).astype('f4')

    for zoom in range(start_zoom, end_zoom + 1):
        ratio = 2 ** (zoom - start_zoom)
        dsize_low = ((x_size + buf_size * 2) * ratio, (y_size + buf_size * 2) * ratio)
        _z_low_with_buf = cv2.resize(src=z_low_with_buf, dsize=dsize_low, interpolation=cv2.INTER_LINEAR)
        dsize_height = (x_size * ratio, y_size * ratio)
        _z_height = cv2.resize(src=z_height, dsize=dsize_height, interpolation=cv2.INTER_CUBIC)
        z_height = band_height.ReadAsArray(0, 0, x_size_height, y_size_height, x_size * ratio, y_size * ratio).\
            astype('f4')
        _z_height = z_height
        bound_yx = get_data_bound(_z_height, 0.8)
        # 如果没有边界 则,不融合,直接采用低精度数据 cut
        if bound_yx.shape[0] != 0:
            bound_yx = bound_yx.astype('i4')
            _x = np.zeros(shape=(0, 1))     # x y z 添加边界值
            _y = np.zeros(shape=(0, 1))
            _z = np.zeros(shape=(0, 1))
            for _bound_yx in bound_yx:
                _bound_y_height = _bound_yx[0]
                _bound_x_height = _bound_yx[1]
                _bound_z_height = _z_height[_bound_y_height, _bound_x_height]
                _bound_y_low = _bound_yx[0] + buf_size * ratio
                _bound_x_low = _bound_yx[1] + buf_size * ratio
                _bound_z_low = _z_low_with_buf[_bound_y_low, _bound_x_low]
                _z_d = _bound_z_height - _bound_z_low
                _x = np.vstack((_x, _bound_x_low))
                _y = np.vstack((_y, _bound_y_low))
                _z = np.vstack((_z, _z_d))
            # x y z 添加边界值缓冲边界 0 值
            _xy = np.hstack((_x, _y))
            _m_point = MultiPoint(_xy)
            _zero_bound = _m_point.buffer(50)
            _zero_bound_xy = _zero_bound.exterior.coords.xy
            _zero_bound_x = np.around(np.array(_zero_bound_xy[0])).reshape(len(_zero_bound_xy[0]), -1)
            _zero_bound_y = np.around(np.array(_zero_bound_xy[1])).reshape(len(_zero_bound_xy[1]), -1)
            _zero_bound_z = np.zeros(shape=(_zero_bound_x.shape))
            _x = np.vstack((_x, _zero_bound_x))
            _y = np.vstack((_y, _zero_bound_y))
            _z = np.vstack((_z, _zero_bound_z))
            # x y z 添加数据边界 0 值
            for _x_edge in range(0, (x_size + buf_size * 2) * ratio):
                _x = np.vstack((_x, [_x_edge]))
                _y = np.vstack((_y, [0]))
                _x = np.vstack((_x, [_x_edge]))
                _y = np.vstack((_y, [(y_size + buf_size * 2) * ratio]))
                _z = np.vstack((_z, [[0], [0]]))
            for _y_edge in range(0, (y_size + buf_size * 2) * ratio):
                _y = np.vstack((_y, [_y_edge]))
                _x = np.vstack((_x, [0]))
                _y = np.vstack((_y, [_y_edge]))
                _x = np.vstack((_x, [(x_size + buf_size * 2) * ratio]))
                _z = np.vstack((_z, [[0], [0]]))

            # _xy = np.hstack((_x, _y))
            # grid = np.mgrid[0:(x_size + buf_size * 2) * ratio, 0:(y_size + buf_size * 2) * ratio]
            # _z_d_new = interpolate.griddata(_xy, _z, grid, method='linear')
            func = interpolate.interp2d(_x, _y, _z,  kind='linear')
            _x_new = np.arange(0, (x_size + buf_size * 2) * ratio)
            _y_new = np.arange(0, (y_size + buf_size * 2) * ratio)
            _z_d_new = func(_x_new, _y_new)

            _z_low_with_buf = _z_low_with_buf + _z_d_new

            for y in range(y_size * ratio):
                for x in range(x_size * ratio):
                    _z_height_v = _z_height[y][x]
                    y_low = y + buf_size * ratio
                    x_low = x + buf_size * ratio
                    _z_low_v = _z_low_with_buf[y_low][x_low]
                    if abs(_z_height_v - 0) > 0.001 and abs(_z_height_v - _z_low_v) > 0.001:
                        _z_low_with_buf[y_low][x_low] = _z_height_v
        # cut
        z_merged = _z_low_with_buf
        blc_bounds = get_tif_tile_bounds(filename_height, output_dir, zoom)
        for fname in blc_bounds:
            blc_bound = blc_bounds[fname]
            blc_x_min_low = round((blc_bound[0] - min_lng_low) / dx_low)
            blc_y_min_low = round((blc_bound[3] - min_lat_low) / dy_low)
            blc_x_max_low = round((blc_bound[2] - min_lng_low) / dx_low)
            blc_y_max_low = round((blc_bound[1] - min_lat_low) / dy_low)

            blc_x_offset_low = (blc_x_min_low - x_offset + buf_size) * ratio
            blc_y_offset_low = (blc_y_min_low - y_offset + buf_size) * ratio
            blc_x_size_low = (blc_x_max_low - blc_x_min_low) * ratio
            blc_y_size_low = (blc_y_max_low - blc_y_min_low) * ratio
            blc_z_low = z_merged[blc_y_offset_low:blc_y_offset_low + blc_y_size_low, blc_x_offset_low:blc_x_offset_low + blc_x_size_low]
            points_xyz = []
            z_low = np.array(blc_z_low)
            for x in range(blc_z_low.shape[1]):
                for y in range(blc_z_low.shape[0]):
                    point_xyz = [x, y, blc_z_low[y, x]]
                    points_xyz.append(point_xyz)
            points_xyz = np.array(points_xyz)
            points_xy = points_xyz[:, 0:2]
            tri = Delaunay(points_xy)
            index = tri.simplices
            points_xyz = (points_xyz + (blc_x_min_low, blc_y_min_low, 0)) * (dx_low, dy_low, 1) + (x0_low, y0_low, 0)
            write_terrain(fname, points_xyz, index)
示例#20
0
def get_segdata(json_data, datap, labels=None):
    Z = len(datap["segmentation"])
    X = len(datap["segmentation"][0])
    Y = len(datap["segmentation"][0][0])

    for slice in range(0, Z):
        nbr_drawings = json_data['drawings'][slice][0]['length']
        for draw in range(0, nbr_drawings):
            # zpracovavany label
            dict_key = json_data['drawingsDetails'][slice][0][draw]['textExpr']
            if labels != None and dict_key not in labels:  # zpracovavani jen nekterych objektu
                continue

            # riznuti jsonu u souradnic krajnich bodu
            draw_info = json_data['drawings'][slice][0][str(draw)]
            start = draw_info.find("points")
            end = draw_info.find("stroke")
            points_data = draw_info[start + 9:end - 3].split(',')

            # prepsani souradnic do pole
            len_array = int(len(points_data) / 2)
            points = np.empty((len_array, 2))
            i = 0
            for j in range(0, len_array):
                points[j] = [int(points_data[i + 1]), int(points_data[i])]
                i += 2

            # vyplneni nakresleneho obrazce
            # pri spatnem nakresleni krivky muze zde padnout
            hull = Delaunay(points)
            x, y = np.mgrid[0:X, 0:Y]
            grid = np.vstack([x.ravel(), y.ravel()]).T
            simplex = hull.find_simplex(grid)
            fill = grid[simplex >= 0, :]

            # vlozeni markeru do dat
            # pokud je label ve slovniku, pouzije se hodnota z nej, neprepise se na novou!
            # pokud label neni ve slovniku, priradi se mu nahodna hodnota v rozmezi 100 - 254,
            # jestlize neobsahuje v popisu vlastni hodnotu
            # pokud neni uveden label, ale pouze hodnota, vytvori se label se strukturou "lbl_" + hodnota
            # jestlize neni uveden ani label, ani hodnota, nic se neprovede
            dict_value = 0
            dict_description = json_data['drawingsDetails'][slice][0][draw][
                'longText'].replace("'", '"')
            if dict_description == '':
                if dict_key == '':
                    print("Drawing is not defined at slice", slice)
                    continue
                else:
                    if dict_key in datap["slab"].keys():
                        dict_value = datap["slab"][dict_key]
                    else:
                        dict_value = random.randint(100, 254)
                        datap["slab"][dict_key] = dict_value
            else:
                dict_description = json.loads(dict_description)
                if dict_key != '' and dict_key in datap["slab"].keys():
                    dict_value = datap["slab"][dict_key]
                elif "value" in dict_description.keys():
                    dict_value = dict_description["value"]
                    if dict_key != '':
                        datap["slab"][dict_key] = dict_value
                    else:
                        dict_key = "lbl_" + str(dict_value)
                        datap["slab"][dict_key] = dict_value
                elif dict_key != '':
                    dict_value = random.randint(100, 254)
                    datap["slab"][dict_key] = dict_value
                else:
                    print("Drawing is not defined at slice", slice)
                    continue

            for i, j in fill:
                datap["segmentation"][Z - 1 - slice][i][j] = dict_value

            # ziskani zbytku popisu
            if dict_key not in description.keys():
                description[dict_key] = {}
                i_color = draw_info.find('#') + 1
                description[dict_key]["r"] = int(
                    draw_info[i_color:(i_color + 2)], 16)
                description[dict_key]["g"] = int(
                    draw_info[(i_color + 2):(i_color + 4)], 16)
                description[dict_key]["b"] = int(
                    draw_info[(i_color + 4):(i_color + 6)], 16)
                description[dict_key]["value"] = dict_value

            if dict_description != '':
                if "threshold" in dict_description.keys():
                    description[dict_key]["threshold"] = dict_description[
                        "threshold"]  # nastavit kolem 100 - 120
                if "two" in dict_description.keys():
                    description[dict_key]["two"] = dict_description["two"]
                if "three" in dict_description.keys():
                    description[dict_key]["three"] = dict_description["three"]
示例#21
0
def place(env, idx):
    #if no box in env add the current box add origin with a random rotation along axis z;
    if 0 == len(env['box']):
        env['idx'].append(idx)
        theta = np.random.uniform(0, 0.25 * np.pi)
        r = R.from_quat([0, 0, np.sin(theta),
                         np.cos(theta)])
        env['box'].append(r.apply(box_vert[idx, ...]))
        env['top'].append(1)
        env['base'].append(box_base[idx])
        env['R'].append(np.array([0, 0, np.sin(theta),
                                  np.cos(theta)]))
        env['t'].append(np.zeros([1, 3], np.float32))
    else:
        # if there is a box larger than current one that is not occupied then place on top
        #count available top:
        cnt = 0
        for i in range(len(env['box'])):
            if (env['top'][i] == 1) and (env['base'][i] > box_base[idx]):
                cnt += 1
        if cnt > 0:
            top = int(np.random.uniform(0, cnt))
            for i in range(len(env['box'])):
                if (env['top'][i] == 1) and (env['base'][i] > box_base[idx]):
                    top -= 1
                    if top > 0:
                        continue
                    env['top'][i] = 0
                    #occupy this top;
                    theta = np.random.uniform(0, 0.25 * np.pi)
                    r = R.from_quat([0, 0, np.sin(theta),
                                     np.cos(theta)])
                    top_vert = env['box'][i][4:8, :]
                    w = np.random.uniform(0.5, 1.0, [4, 1])
                    w = w / np.sum(w, keepdims=True)
                    t = np.sum(w * top_vert, axis=0, keepdims=True)
                    #append current box
                    env['idx'].append(idx)
                    env['box'].append(r.apply(box_vert[idx, ...]) + t)
                    env['top'].append(1)
                    env['base'].append(box_base[idx])
                    env['R'].append(
                        np.array([0, 0, np.sin(theta),
                                  np.cos(theta)]))
                    env['t'].append(t)
        else:  #else place on ground
            #randomly rotate current box;
            theta = np.random.uniform(0, 0.25 * np.pi)
            r = R.from_quat([0, 0, np.sin(theta),
                             np.cos(theta)])
            cbox = r.apply(box_vert[idx, ...])
            hull_pts = np.zeros([1, 2])
            for i in range(len(env['box'])):
                base_pts = env['box'][i][0:4, :]
                if (base_pts[:, 2] == 0.0).all():
                    hull_pts = np.concatenate([hull_pts, base_pts[:, 0:2]],
                                              axis=0)
            hull = Delaunay(hull_pts)
            pts = np.sum(box_border_w * cbox[0:4, 0:2].reshape(1, 4, 2),
                         axis=1)
            tell = (hull.find_simplex(pts) >= 0).any()
            dt = np.zeros([1, 3])
            dxy = np.random.uniform(0, 1.0, [1, 2])
            dxy = 0.01 * dxy / np.linalg.norm(dxy, 2)
            dt[0, 0:2] = dxy
            t = np.zeros([1, 3])
            while tell:
                t += dt
                cbox = cbox + dt
                pts = np.sum(box_border_w * cbox[0:4, 0:2].reshape(1, 4, 2),
                             axis=1)
                tell = (hull.find_simplex(pts) >= 0).any()
            env['idx'].append(idx)
            env['box'].append(cbox)
            env['top'].append(1)
            env['base'].append(box_base[idx])
            env['R'].append(np.array([0, 0, np.sin(theta),
                                      np.cos(theta)]))
            env['t'].append(t)
            center_env(env)
示例#22
0
N_part = len(particle)
N_sol = len(solution)
N_oxy = N_sol / 3

outPoints = []
points = particle.positions
init = particle.positions
iteration = 0
while iteration < 3:
    iteration += 1
    boundaryVertices = []
    boundaryFacets = []
    boundaryEdges = []
    boundaryCell = []
    triCells = Delaunay(points, False)
    allCells = triCells.simplices
    allCellsl = allCells.tolist()
    ncells = triCells.nsimplex
    #for vert in tri.triangles:
    print allCells, ncells

    neighborCells = triCells.neighbors

    #Searching for facets at the boundary of the triangulation
    l = -1
    for i in range(ncells):
        atBoundary = False
        for k in range(DIM + 1):
            if neighborCells[i, k] == -1:
                l += 1
示例#23
0
 Trans = np.eye(4)
 Trans[0, 3] = -Centroid.x
 Trans[1, 3] = -Centroid.y
 Trans[2, 3] = -Centroid.z
 Rot = np.eye(4)
 Rot[0:3, 0:3] = axes.transpose()
 T = Rot.dot(Trans)
 VH2D = transformPoints(T, VH)
 VH2D = VH2D[:, 0:2]
 [minx, miny] = VH2D.min(0)
 [maxx, maxy] = VH2D.max(0)
 ux, uy = np.mgrid[minx:maxx:6j, miny:maxy:6j]
 ux = ux.flatten()
 uy = uy.flatten()
 uindices = []
 tri = Delaunay(VH2D)
 Vs = tri.vertices
 for i in range(len(ux)):
     #print "\n"
     #Check the CCW of this point against every point on the hole
     isInside = False
     for j in range(Vs.shape[0]):
         [i1, i2, i3] = Vs[j]
         D1 = np.array([[1, ux[i], uy[i]], [1, VH2D[i1, 0], VH2D[i1, 1]],
                        [1, VH2D[i2, 0], VH2D[i2, 1]]])
         D2 = np.array([[1, ux[i], uy[i]], [1, VH2D[i2, 0], VH2D[i2, 1]],
                        [1, VH2D[i3, 0], VH2D[i3, 1]]])
         D3 = np.array([[1, ux[i], uy[i]], [1, VH2D[i3, 0], VH2D[i3, 1]],
                        [1, VH2D[i1, 0], VH2D[i1, 1]]])
         det1 = np.sign(linalg.det(D1))
         det2 = np.sign(linalg.det(D2))
示例#24
0
 def constructIntermediateImageBlending(self):
     myadd = lambda xs, ys: tuple(int(x + y) for x, y in zip(xs, ys))
     self.inter_image = self.list2
     print(self.inter_image)
     self.tri = Delaunay(self.inter_image)
     # print(tri.simplices)
     # tri.simplices is a 2d-numpy array kx3 array with indices of points of each simplex in each row
     m, n, _ = self.i2Shape
     print((m, n))
     final_image_coordinates = list(itertools.product(range(m), range(n)))
     p = self.tri.find_simplex(final_image_coordinates)
     self.inside = p
     # Blending
     """
     if p[k] is >= 0 then k belongs to Omega
     |N_p| = 4
     q \in N_p AND Omega 
     """
     interior_points = {}
     count = 0
     for i in range(m):
         for j in range(n):
             if p[i * n + j] >= 0:
                 interior_points[(i, j)] = count
                 count += 1
     bary_coordinates = self.barycentric_coordinates(
         final_image_coordinates, p)
     simplices = self.tri.simplices
     int_image1 = np.zeros((m, n, 3))
     top_pixel = np.zeros((m, n, 3))
     right_pixel = np.zeros((m, n, 3))
     down_pixel = np.zeros((m, n, 3))
     left_pixel = np.zeros((m, n, 3))
     for i in range(m):
         for j in range(n):
             triangle_number = p[i * n + j]
             if triangle_number < 0:
                 int_image1[i, j] = self.image2_unmod[i, j]
                 continue
             corner_points = simplices[triangle_number]
             barycentric = bary_coordinates[i * n + j]
             p1 = myadd(
                 myadd(
                     tuple(x * barycentric[0]
                           for x in self.list1[corner_points[0]]),
                     tuple(x * barycentric[1]
                           for x in self.list1[corner_points[1]])),
                 tuple(x * barycentric[2]
                       for x in self.list1[corner_points[2]]))
             int_image1[i, j] = self.image1_unmod[p1]
             top_pixel[i, j] = self.image1_unmod[(p1[0] - 1, p1[1])]
             right_pixel[i, j] = self.image1_unmod[(p1[0], p1[1] + 1)]
             down_pixel[i, j] = self.image1_unmod[(p1[0] + 1, p1[1])]
             left_pixel[i, j] = self.image1_unmod[(p1[0], p1[1] - 1)]
     # Now p has the triangle number for each of the pixels
     grad_x = cv2.Sobel(int_image1, cv2.CV_8U, 1, 0)
     grad_y = cv2.Sobel(int_image1, cv2.CV_8U, 0, 1)
     row_indices = []
     column_indices = []
     data = []
     right_r = []
     right_g = []
     right_b = []
     for i in range(m):
         for j in range(n):
             triangle_number = p[i * n + j]
             if triangle_number < 0:
                 continue
             row_indices.append(interior_points[(i, j)])
             column_indices.append(interior_points[(i, j)])
             data.append(4)
             right = np.zeros(3)
             count = 0
             for neighbor in [(i + 1, j), (i - 1, j), (i, j + 1),
                              (i, j - 1)]:
                 count += 1
                 if self.inside[neighbor[0] * n + neighbor[1]] >= 0:
                     row_indices.append(interior_points[(i, j)])
                     column_indices.append(interior_points[neighbor])
                     data.append(-1)
                 elif self.in_delta_omega(neighbor):
                     right += self.image2_unmod[neighbor]
                     if count == 1:
                         right += int_image1[i, j] - down_pixel[i, j]
                     if count == 2:
                         right += int_image1[i, j] - top_pixel[i, j]
                     if count == 3:
                         right += int_image1[i, j] - right_pixel[i, j]
                     if count == 4:
                         right += int_image1[i, j] - left_pixel[i, j]
                     continue
                 right += int_image1[i, j] - int_image1[neighbor]
             right_r.append(right[0])
             right_g.append(right[1])
             right_b.append(right[2])
     print("Start")
     A = sparse.csr_matrix(
         (data, (row_indices, column_indices)),
         shape=(len(interior_points), len(interior_points)))
     b_r = np.array(right_r)
     b_g = np.array(right_g)
     b_b = np.array(right_b)
     f_r = spsolve(A, b_r)
     print("Done1")
     f_g = spsolve(A, b_g)
     print("Done2")
     f_b = spsolve(A, b_b)
     print("Done3")
     print(len(interior_points))
     # cv2.imwrite('final.png',final_image)
     cv2.imwrite('before_change_swap.png', int_image1)
     for i in range(m):
         for j in range(n):
             if self.inside[i * n + j] < 0:
                 continue
             int_image1[i, j][0] = f_r[interior_points[(i, j)]]
             int_image1[i, j][1] = f_g[interior_points[(i, j)]]
             int_image1[i, j][2] = f_b[interior_points[(i, j)]]
     cv2.imwrite('final_swap.png', int_image1)
示例#25
0
def delaunay_plot(points):

    #points = np.array([[0, 0], [0, 1.1], [1, 0], [1, 1]])
    tri = Delaunay(points)

    print(tri)
示例#26
0
def voronoi(seeds, bnd):

    if seeds.shape[0] == 1:
        return None

    if seeds.shape[0] == 2:
        return None

    if all(x == seeds[0, 0] for x in seeds[:, 0]) or all(x == seeds[0, 1]
                                                         for x in seeds[:, 1]):
        raise Exception(
            'seeds have the same value for x or y:\n {}'.format(seeds))

    # Delaunay triangulation
    tri = Delaunay(seeds)

    # find  neighbor indices for each seed point
    neib_inices = [[] for seed in seeds]
    # iterate over seeds
    for j, seed in enumerate(seeds):
        neib_inices[j] = []
        # check if simplice contains seed
        for simplex in tri.simplices:
            intersect = np.intersect1d(simplex, j)
            if intersect.size > 0:
                # add points of simplice that are not the seed
                neib_inices[j].append(np.setdiff1d(simplex, j))
        # get rid of duplicates
        neib_inices[j] = np.unique(neib_inices[j])

    # linear equations for the boundary
    bndhull = ConvexHull(bnd)
    bndTmp = bndhull.equations
    bndMat = np.matrix(bndTmp)
    Abnd = bndMat[:, 0:2]
    bbnd = bndMat[:, 2]

    # find linear equations for perpendicular bisectors
    mylistA = []  # Contains vectors between seeds to their neighbours
    mylistb = [
    ]  # Contains dot products of vectors in listA to centre between seeds

    for i, seed in enumerate(seeds):
        A = []
        b = []
        for ind in neib_inices[i]:
            Altmp, bltmp = perpBisector2d(seed, seeds[ind])
            A.append(Altmp)
            b.append(bltmp)
        mylistA.append(np.matrix(A))
        mylistb.append(np.matrix(b))

    # obtain voronoi vertices
    cells = []
    for j in range(len(mylistA)):
        cell = []
        Atmp = np.concatenate((mylistA[j], Abnd))
        btmp = np.concatenate((mylistb[j].transpose(), -bbnd))
        combinations = itertools.combinations(range(Atmp.shape[0]), 2)
        for tupl in combinations:
            lineA = [
                Atmp[tupl[0]][0, 0], Atmp[tupl[0]][0, 1], btmp[tupl[0]][0, 0]
            ]
            lineB = [
                Atmp[tupl[1]][0, 0], Atmp[tupl[1]][0, 1], btmp[tupl[1]][0, 0]
            ]
            vertex = interLine(lineA, lineB)
            if type(vertex) != type(False):
                if (np.round(np.dot(Atmp, vertex), 6) <= np.round(
                        btmp.transpose(), 6)).all():
                    if not any((vertex == x).all() for x in cell):
                        cell.append(vertex)
        cell = np.asarray(counterClockwise(seeds[j], cell))
        cells.append(cell)
    return cells
def in_hull(p, hull):
    from scipy.spatial import Delaunay
    if not isinstance(hull, Delaunay):
        hull = Delaunay(hull)
    return hull.find_simplex(p) >= 0
示例#28
0
def process_file(input_file, output_file):

    max_points = 1000
    merge_percent = 500
    save_debug_images = False

    image = face_recognition.load_image_file(input_file)

    face_landmarks_list_ = face_recognition.face_landmarks(image)
    face_landmarks_list = [face_landmarks_list_[0]]
    face_locations = face_recognition.face_locations(image)

    im = Image.open(input_file)
    pil_image = Image.new('RGB', im.size, color='black')
    d = ImageDraw.Draw(pil_image)

    input_points = []

    feature_min = 99999
    feature_max = 0

    eye_centers = []
    mouth = (0, 0)
    for face_landmarks in face_landmarks_list:
        for facial_feature in face_landmarks.keys():
            d.line(face_landmarks[facial_feature], width=2)
            if facial_feature is "left_eye" or facial_feature is "right_eye":
                avg = (0, 0)
                l = 0
                for pt in face_landmarks[facial_feature]:
                    avg = (avg[0] + pt[0], avg[1] + pt[1])
                    l = l + 1
                avg = (avg[0] / l, avg[1] / l)
                eye_centers.append(avg)
            if facial_feature is not "nose_bridge":
                for pt in face_landmarks[facial_feature]:
                    if pt[0] < feature_min:
                        feature_min = pt[0]
                    if pt[0] > feature_max:
                        feature_max = pt[0]
                    input_points.append(pt)

    feature_width = abs(feature_max - feature_min)

    feature_min = feature_min - feature_width / 20
    feature_max = feature_max + feature_width / 20

    edge_image = im.copy()
    edge_image = edge_image.filter(ImageFilter.FIND_EDGES)

    enhancer = ImageEnhance.Contrast(edge_image)
    edge_image = enhancer.enhance(2)

    #edge_image = edge_image.filter(ImageFilter.EDGE_ENHANCE)
    #edge_image = edge_image.filter(ImageFilter.EDGE_ENHANCE_MORE)

    #max_points = 30
    points_placed = 0

    edge_points = []
    border = 20

    feature_distance = 60

    for loc in face_locations:
        aloc = (loc[0] - (loc[2] - loc[0]) / 2, loc[1] + (loc[1] - loc[3]) / 2,
                loc[2], loc[3] - (loc[1] - loc[3]) / 2)
        #if x > aloc[3] and x < aloc[1] and y > aloc[0] and y < aloc[2]:
        for y in range(int(aloc[0]), int(aloc[2])):
            for x in range(int(aloc[3]), int(aloc[1])):
                if x > feature_min and x < feature_max:
                    px = edge_image.getpixel((x, y))
                    brightness = (im.getpixel((x, y))[0] + im.getpixel(
                        (x, y))[1] + im.getpixel((x, y))[2]) / 3
                    probability = brightness
                    max_brightness = 200
                    eye_distance = 9999
                    edge_min = 100
                    for eye in eye_centers:
                        distance = math.sqrt(
                            math.pow(eye[0] - x, 2) + math.pow(eye[1] - y, 2))
                        if distance < eye_distance:
                            eye_distance = distance
                    if eye_distance < 10:
                        probability = 0
                        max_brightness = 200
                        edge_min = 80
                    random_insert = random.randint(0, 1000) > 997
                    if random_insert or (brightness < max_brightness and
                                         random.randint(0, 600) > probability
                                         and px[2] > edge_min):
                        edge_points.append((x, y))

    de = ImageDraw.Draw(edge_image)

    for ep in edge_points:
        #filter out points below the jawline
        doAppend = True
        for face_landmarks in face_landmarks_list:
            lastPt = None
            for pt in face_landmarks['chin']:
                de.rectangle([pt, (pt[0] + 10, pt[1] + 10)],
                             fill=(0, 255, 255, 255))
            if lastPt is not None:
                vec = (pt[0] - lastPt[0], pt[1] - lastPt[1])
                vecNorm = LA.norm(vec)
                vec = (vec[1] / vecNorm, -vec[0] / vecNorm)
                de.line([lastPt, (lastPt[0] + vec[0], lastPt[1] + vec[1])],
                        fill=(255, 0, 0, 255),
                        width=3)
                vecToPt = (ep[0] - lastPt[0], ep[1] - lastPt[1])
                vecNorm = LA.norm(vecToPt)
                if vecNorm == 0:
                    vecNorm = 0000.1
                vecToPt = (vecToPt[0] / vecNorm, vecToPt[1] / vecNorm)
                if numpy.dot(vec, vecToPt) < -0.5:
                    de.line([
                        lastPt,
                        (lastPt[0] + vecToPt[0] * 2000,
                         lastPt[1] + vecToPt[1] * 2000)
                    ],
                            fill=(255, 255, 0, 255),
                            width=2)
                    doAppend = False
            lastPt = (pt[0], pt[1])
        for face_landmarks in face_landmarks_list:
            for pt in face_landmarks['nose_tip']:
                if ep[1] < pt[1]:
                    doAppend = True

        if doAppend:
            input_points.append(ep)
            de.rectangle([ep, (ep[0] + 10, ep[1] + 10)], fill=(0, 255, 0, 255))
        else:
            de.rectangle([ep, (ep[0] + 10, ep[1] + 10)], fill=(255, 0, 0, 255))

    if save_debug_images:
        for loc in face_locations:
            face_location = (loc[0] - (loc[2] - loc[0]) / 2,
                             loc[1] + (loc[1] - loc[3]) / 2, loc[2],
                             loc[3] - (loc[1] - loc[3]) / 2)
            top, right, bottom, left = face_location
            de.rectangle([left, top, right, bottom],
                         outline=(255, 255, 0, 255))
            face_image = image[top:bottom, left:right]
            fimage = Image.fromarray(face_image)
            fimage.save('face.png')

    if save_debug_images:
        edge_image.save('edges.png')

    pmin = [edge_image.size[0], edge_image.size[1]]
    pmax = [0, 0]

    #merge vertices

    merged_points = []
    merge_thresh = 3
    for i in input_points:
        merge = False
        for m in merged_points:
            if abs(i[0] - m[0]) < merge_thresh and abs(i[1] -
                                                       m[1]) < merge_thresh:
                merge = True
        if not merge:
            merged_points.append(i)

    for p in merged_points:
        if p[0] > pmax[0]:
            pmax[0] = p[0]
        if p[1] > pmax[1]:
            pmax[1] = p[1]
        if p[0] < pmin[0]:
            pmin[0] = p[0]
        if p[1] < pmin[1]:
            pmin[1] = p[1]

    tri = Delaunay(merged_points)

    tri_normalized = []
    tri_normalized_thick = []

    psize = pmax[0] - pmin[0]
    if pmax[1] - pmin[1] > psize:
        psize = pmax[1] - pmin[1]
    psize = float(psize)

    tri_colors = []
    norm_min = 99999.0
    norm_max = 0.0
    for t in tri.simplices:
        #  print("Line: {}".format(t))
        triline = [
            merged_points[t[0]], merged_points[t[1]], merged_points[t[2]],
            merged_points[t[0]]
        ]
        pn0 = [
            float(triline[0][0] - pmin[0]) / psize,
            float(triline[0][1] - pmin[1]) / psize
        ]
        pn1 = [
            float(triline[1][0] - pmin[0]) / psize,
            float(triline[1][1] - pmin[1]) / psize
        ]
        pn2 = [
            float(triline[2][0] - pmin[0]) / psize,
            float(triline[2][1] - pmin[1]) / psize
        ]

        px = int((triline[0][0] + triline[1][0] + triline[2][0]) / 3)
        py = int((triline[0][1] + triline[1][1] + triline[2][1]) / 3)
        color = im.getpixel((px, py))

        eye_distance = 9999
        edge_min = 100
        for eye in eye_centers:
            distance = math.sqrt(
                math.pow(eye[0] - px, 2) + math.pow(eye[1] - py, 2))
            if distance < eye_distance:
                eye_distance = distance
        if eye_distance < 5:
            color = (color[0] / 1.5, color[1] / 1.5, color[2] / 1.5)
        else:
            color = (min(255, color[0] * 2), min(255, color[1] * 2),
                     min(255, color[2] * 2))

        color = (min(255, color[0] + 20), min(255, color[1] + 20),
                 min(255, color[2] + 20))
        tri_colors.append(color)

        if pn0[0] < norm_min:
            norm_min = pn0[0]
        if pn1[0] < norm_min:
            norm_min = pn1[0]
        if pn2[0] < norm_min:
            norm_min = pn2[0]

        if pn0[0] > norm_max:
            norm_max = pn0[0]
        if pn1[0] > norm_max:
            norm_max = pn1[0]
        if pn2[0] > norm_max:
            norm_max = pn2[0]

        tri_normalized.append([pn0, pn1, pn2])
        #if random.randint(0, 10) > 3:
        d.line(triline, width=2)

    #recenter
    x_offset = -(norm_min + abs(norm_max - norm_min) / 2.0)

    #tri_centered = []
    #for tri in tri_normalized:
    #	tri_centered.append([(tri[0][0] + x_offset, tri[0][1]), (tri[1][0] + x_offset, tri[1][1]), (tri[2][0] + x_offset, tri[2][1])])
    #tri_normalized = tri_centered

    #fill thick array
    for face_landmarks in face_landmarks_list:
        for facial_feature in face_landmarks.keys():
            if facial_feature is "chin":
                thick_line = []
                for pt in face_landmarks[facial_feature]:
                    pn0 = (float(pt[0] - pmin[0]) / psize,
                           float(pt[1] - pmin[1]) / psize)
                    thick_line.append(pn0)
                tri_normalized_thick.append(thick_line)

    for face_landmarks in face_landmarks_list:
        for facial_feature in face_landmarks.keys():
            d.line(face_landmarks[facial_feature], width=5)

    if save_debug_images:
        pil_image.save("out.png")

    #snap to grid


#	tri_snapped = []
#	gridsize = 1.0/32.0
#	for t in tri_normalized:
#		triline = []
#		for pt in t:
#			triline.append((rnd(pt[0], gridsize), rnd(pt[1], gridsize)))
#		tri_snapped.append(triline)
#	tri_normalized = tri_snapped
#
#	thick_snapped = []
#	for line in tri_normalized_thick:
#		sline = []
#		for pt in line:
#			sline.append((rnd(pt[0], gridsize), rnd(pt[1], gridsize)))
#		thick_snapped.append(sline)
#	tri_normalized_thick = thick_snapped

    bigsize = 1024
    big_image = Image.new('RGB', (bigsize, bigsize), color='white')
    db = ImageDraw.Draw(big_image)

    draw_filled = False
    draw_both = False

    if draw_both:
        bigsize = float(bigsize)
        idx = 0
        for t in tri_normalized:
            triline = [(t[0][0] * bigsize, t[0][1] * bigsize),
                       (t[1][0] * bigsize, t[1][1] * bigsize),
                       (t[2][0] * bigsize, t[2][1] * bigsize)]
            color = tri_colors[idx]
            val = (color[0] + color[1] + color[2]) / 3
            val = float(val) / 255.0
            val = val * val * 3.0
            val = int(val * 255.0)
            db.polygon(triline, fill=(val, val, val, 255))
            idx = idx + 1
        for t in tri_normalized:
            triline = [(t[0][0] * bigsize, t[0][1] * bigsize),
                       (t[1][0] * bigsize, t[1][1] * bigsize),
                       (t[2][0] * bigsize, t[2][1] * bigsize)]
            db.line(triline, width=2, fill=(0, 0, 0, 255))
        for line in tri_normalized_thick:
            bigline = []
            for pt in line:
                bpt = (pt[0] * bigsize, pt[1] * bigsize)
                bigline.append(bpt)
            db.line(bigline, width=2, fill=(0, 0, 0, 255))

    elif draw_filled:
        bigsize = float(bigsize)
        idx = 0
        for t in tri_normalized:
            triline = [(t[0][0] * bigsize, t[0][1] * bigsize),
                       (t[1][0] * bigsize, t[1][1] * bigsize),
                       (t[2][0] * bigsize, t[2][1] * bigsize)]
            db.polygon(triline, fill=tri_colors[idx])
            idx = idx + 1
    else:
        bigsize = float(bigsize)
        for t in tri_normalized:
            triline = [(t[0][0] * bigsize, t[0][1] * bigsize),
                       (t[1][0] * bigsize, t[1][1] * bigsize),
                       (t[2][0] * bigsize, t[2][1] * bigsize)]
            db.line(triline, width=2, fill=(0, 0, 0, 255))

    big_lines = []
    for line in tri_normalized_thick:
        bigline = []
        for pt in line:
            bpt = (pt[0] * bigsize, pt[1] * bigsize)
            bigline.append(bpt)
        db.line(bigline, width=4, fill=(0, 0, 0, 255))

    if save_debug_images:
        big_image.save('big.png')

    output_file = "project/media/" + output_file
    fout = open(output_file, 'wb')
    fout.write(struct.pack('i', len(tri_normalized)))

    idx = 0
    for t in tri_normalized:
        fout.write(
            struct.pack('ffffff', t[0][0] + x_offset, t[0][1],
                        t[1][0] + x_offset, t[1][1], t[2][0] + x_offset,
                        t[2][1]))
        color = tri_colors[idx]
        fout.write(
            struct.pack('fff', color[0] / 255.0, color[1] / 255.0,
                        color[2] / 255.0))
        idx = idx + 1

    fout.close()
示例#29
0
def compute(myVelfield, MyParams):
	print("Computing strain via Hammond method.");
	z = np.array([myVelfield.elon,myVelfield.nlat]);
	z = z.T;
	tri=Delaunay(z);

	triangle_vertices = z[tri.simplices];
	trishape = np.shape(triangle_vertices);  # 516 x 3 x 2, for example
	print("Number of triangle elements: %d" % (trishape[0]));

	# We are going to solve for the velocity gradient tensor at the centroid of each triangle. 
	centroids=[];
	for i in range(trishape[0]):
		xcor_mean = np.mean([triangle_vertices[i,0,0],triangle_vertices[i,1,0],triangle_vertices[i,2,0]]);
		ycor_mean = np.mean([triangle_vertices[i,0,1],triangle_vertices[i,1,1],triangle_vertices[i,2,1]]);
		centroids.append([xcor_mean,ycor_mean]);
	xcentroid=[x[0] for x in centroids];
	ycentroid=[x[1] for x in centroids];

	# Initialize arrays.
	I2nd=[];
	rot=[];
	max_shear=[];
	e1=[]; # eigenvalues
	e2=[];
	v00=[];  # eigenvectors
	v01=[];
	v10=[];
	v11=[];
	dilatation=[]; # dilatation	= e1+e2

	# for each triangle:
	for i in range(trishape[0]):

		# Get the velocities of each vertex (VE1, VN1, VE2, VN2, VE3, VN3)
		# Get velocities for Vertex 1 (triangle_vertices[i,0,0] and triangle_vertices[i,0,1])
		xindex1 = np.where(myVelfield.elon==triangle_vertices[i,0,0])
		yindex1 = np.where(myVelfield.nlat==triangle_vertices[i,0,1])
		index1  = int(np.intersect1d(xindex1,yindex1)[0]);
		xindex2 = np.where(myVelfield.elon==triangle_vertices[i,1,0])
		yindex2 = np.where(myVelfield.nlat==triangle_vertices[i,1,1])
		index2  = int(np.intersect1d(xindex2,yindex2)[0]);
		xindex3 = np.where(myVelfield.elon==triangle_vertices[i,2,0])
		yindex3 = np.where(myVelfield.nlat==triangle_vertices[i,2,1])
		index3  = int(np.intersect1d(xindex3,yindex3)[0]);
		
		phi=np.array([triangle_vertices[i,0,0], triangle_vertices[i,1,0], triangle_vertices[i,2,0] ]);
		theta=np.array([triangle_vertices[i,0,1], triangle_vertices[i,1,1], triangle_vertices[i,2,1] ]);
		theta=[i-90 for i in theta];
		u_phi=np.array([myVelfield.e[index1],myVelfield.e[index2],myVelfield.e[index3]]);
		u_theta=np.array([myVelfield.n[index1],myVelfield.n[index2],myVelfield.n[index3]]);
		u_theta=np.array([-i for i in u_theta]);  # colatitude needs negative theta values. 
		s_phi=np.array([myVelfield.se[index1],myVelfield.se[index2],myVelfield.se[index3]]);
		s_theta=np.array([myVelfield.sn[index1],myVelfield.sn[index2],myVelfield.sn[index3]]);

		# HERE WE PLUG IN BILL'S CODE! 
		weight=1;
		paramsel=0;
		[e_phiphi,e_thetaphi,e_thetatheta,omega_r,U_theta,U_phi,s_omega_r,s_e_phiphi,s_e_thetaphi,s_e_thetatheta,s_U_theta,s_U_phi,chi2,OMEGA,THETA_p,PHI_p,s_OMEGA,s_THETA_p,s_PHI_p,r_PHITHETA,u_phi_p,u_theta_p] = strain_sphere(phi,theta,u_phi,u_theta,s_phi,s_theta,weight,paramsel);

		# print_all_values(e_phiphi,e_thetaphi,e_thetatheta,omega_r,U_theta,U_phi,s_omega_r,s_e_phiphi,s_e_thetaphi,s_e_thetatheta,s_U_theta,s_U_phi,chi2,OMEGA,THETA_p,PHI_p,s_OMEGA,s_THETA_p,s_PHI_p,r_PHITHETA,u_phi_p,u_theta_p);

		# The components that are easily computed
		# Units: nanostrain per year. 
		exx=e_phiphi*1e6;
		exy=-e_thetaphi*1e6;
		eyy=e_thetatheta*1e6;

		# # Compute a number of values based on tensor properties. 
		I2nd_tri = np.log10(np.abs(strain_tensor_toolbox.second_invariant(exx, exy, eyy)));
		I2nd.append(I2nd_tri);
		rot.append(OMEGA*1000*1000);
		[e11, e22, v] = strain_tensor_toolbox.eigenvector_eigenvalue(exx, exy, eyy);

		e1.append(-e11);  # the convention of this code returns negative eigenvalues compared to my other codes. 
		e2.append(-e22);
		max_shear.append((e11 - e22)/2);
		v00.append(v[0][0]);
		v10.append(v[1][0]);
		v01.append(v[0][1]);
		v11.append(v[1][1]);
		dilatation.append(-e11+-e22);  # # the convention of this code returns negative eigenvalues compared to my other codes. 

	return [xcentroid, ycentroid, triangle_vertices, I2nd, max_shear, rot, e1, e2, v00, v01, v10, v11, dilatation];
示例#30
0
    def run(self, setup):
        """Run a preprocessing task.

        :param obj setup: inputSetup object.
        :return: None
        """
        # ---------------------------------------------------------------
        # Initialization
        # ---------------------------------------------------------------
        # Start timer
        Timers()["Preprocessing"].start()

        # Parameters shortcut (for code legibility)
        model = setup.model
        output = setup.output

        # Obtain the MPI environment
        parEnv = MPIEnvironment()

        # ---------------------------------------------------------------
        # Import mesh file (gmsh format)
        # ---------------------------------------------------------------
        # Read nodes
        nodes, _ = readGmshNodes(model.mesh_file)

        # Read connectivity
        elemsN, nElems = readGmshConnectivity(model.mesh_file)

        # ---------------------------------------------------------------
        # Preprocessing nodal coordinates
        # ---------------------------------------------------------------
        Print.master('     Nodal coordinates')

        # Build coordinates in PETGEM format where each row
        # represent the xyz coordinates of the 4 tetrahedral element
        num_dimensions = 3
        num_nodes_per_element = 4
        data = np.array((nodes[elemsN[:], :]), dtype=np.float)
        data = data.reshape(nElems, num_dimensions*num_nodes_per_element)

        # Get matrix dimensions
        size = data.shape

        # Build PETSc structures
        matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data)

        # Build path to save the file
        out_path = output.directory_scratch + '/nodes.dat'

        if parEnv.rank == 0:
            # Write PETGEM nodes in PETSc format
            writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)

        # ---------------------------------------------------------------
        # Preprocessing mesh connectivity
        # ---------------------------------------------------------------
        Print.master('     Mesh connectivity')

        # Get matrix dimensions
        size = elemsN.shape

        # Build PETSc structures
        matrix = createSequentialDenseMatrixWithArray(size[0], size[1], elemsN)

        # Build path to save the file
        out_path = output.directory_scratch + '/meshConnectivity.dat'

        if parEnv.rank == 0:
            # Write PETGEM connectivity in PETSc format
            writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)

        # ---------------------------------------------------------------
        # Preprocessing edges connectivity
        # ---------------------------------------------------------------
        Print.master('     Edges connectivity')

        # Compute edges
        elemsE, edgesNodes = computeEdges(elemsN, nElems)
        nEdges = edgesNodes.shape[0]

        # Get matrix dimensions
        size = elemsE.shape

        # Build PETSc structures
        matrix = createSequentialDenseMatrixWithArray(size[0], size[1], elemsE)

        # Build path to save the file
        out_path = output.directory_scratch + '/edges.dat'

        if parEnv.rank == 0:
            # Write PETGEM edges in PETSc format
            writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)

        # Reshape edgesNodes and save
        num_nodes_per_edge = 2
        num_edges_per_element = 6
        data = np.array((edgesNodes[elemsE[:], :]), dtype=np.float)
        data = data.reshape(nElems, num_nodes_per_edge*num_edges_per_element)

        # Get matrix dimensions
        size = data.shape

        # Build PETSc structures
        matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data)

        # Build path to save the file
        out_path = output.directory_scratch + '/edgesNodes.dat'

        if parEnv.rank == 0:
            # Write PETGEM edgesNodes in PETSc format
            writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)

        # ---------------------------------------------------------------
        # Preprocessing faces connectivity
        # ---------------------------------------------------------------
        Print.master('     Faces connectivity')

        # Compute faces
        elemsF, facesN = computeFaces(elemsN, nElems)
        nFaces = facesN.shape[0]

        # Get matrix dimensions
        size = elemsF.shape
        # Build PETSc structures
        matrix = createSequentialDenseMatrixWithArray(size[0], size[1], elemsF)

        # Build path to save the file
        out_path = output.directory_scratch + '/faces.dat'

        if parEnv.rank == 0:
            # Write PETGEM edges in PETSc format
            writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)

        # ---------------------------------------------------------------
        # Preprocessing faces-edges connectivity
        # ---------------------------------------------------------------
        Print.master('     Faces-edges connectivity')

        N = invConnectivity(elemsF, nFaces)

        if nElems != 1:
            N = np.delete(N, 0, axis=1)

        # Allocate
        facesE = np.zeros((nFaces, 3), dtype=np.int)

        # Compute edges list for each face
        for i in np.arange(nFaces):
            iEle = N[i, 0]
            edgesEle = elemsE[iEle,:]
            facesEle = elemsF[iEle,:]
            kFace = np.where(facesEle == i)[0]
            if kFace == 0:  # Face 1
                facesE[facesEle[kFace],:] = [edgesEle[0], edgesEle[1], edgesEle[2]]
            elif kFace == 1:  # Face 2
                facesE[facesEle[kFace],:] = [edgesEle[0], edgesEle[4], edgesEle[3]]
            elif kFace == 2:  # Face 3
                facesE[facesEle[kFace],:] = [edgesEle[1], edgesEle[5], edgesEle[4]]
            elif kFace == 3:  # Face 4
                facesE[facesEle[kFace],:] = [edgesEle[2], edgesEle[5], edgesEle[3]]

        num_faces_per_element = 4
        num_edges_per_face = 3
        data = np.array((facesE[elemsF[:], :]), dtype=np.float)
        data = data.reshape(nElems, num_faces_per_element*num_edges_per_face)

        # Get matrix dimensions
        size = data.shape

        # Build PETSc structures
        matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data)

        # Build path to save the file
        out_path = output.directory_scratch + '/facesEdges.dat'

        if parEnv.rank == 0:
            # Write PETGEM edges in PETSc format
            writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)

        # ---------------------------------------------------------------
        # Preprocessing dofs connectivity
        # ---------------------------------------------------------------
        Print.master('     DOFs connectivity')

        # Compute degrees of freedom connectivity
        dofs, dof_edges, dof_faces, _, total_num_dofs = computeConnectivityDOFS(elemsE,elemsF,model.basis_order)

        # Get matrix dimensions
        size = dofs.shape

        # Build PETSc structures
        matrix = createSequentialDenseMatrixWithArray(size[0], size[1], dofs)

        # Build path to save the file
        out_path = output.directory_scratch + '/dofs.dat'

        if parEnv.rank == 0:
            # Write PETGEM edges in PETSc format
            writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)

        # ---------------------------------------------------------------
        # Preprocessing boundaries
        # ---------------------------------------------------------------
        Print.master('     Boundaries')

        # Compute boundary faces
        bFacesN, bFaces = computeBoundaryFaces(elemsF, facesN)

        # Compute boundary edges
        bEdges = computeBoundaryEdges(edgesNodes, bFacesN)

        # Compute dofs on boundaries
        _, indx_boundary_dofs = computeBoundaries(dofs, dof_edges, dof_faces, bEdges, bFaces, model.basis_order);

        # Build PETSc structures
        vector = createSequentialVectorWithArray(indx_boundary_dofs)

        # Build path to save the file
        out_path = output.directory_scratch + '/boundaries.dat'

        if parEnv.rank == 0:
            # Write PETGEM nodes in PETSc format
            writePetscVector(out_path, vector, communicator=PETSc.COMM_SELF)

        # ---------------------------------------------------------------
        # Preprocessing sigma model
        # ---------------------------------------------------------------
        Print.master('     Conductivity model')

        # Read element's tag
        elemsS, nElems = readGmshPhysicalGroups(model.mesh_file)

        # Build conductivity arrays
        conductivityModel = np.zeros((nElems, 2), dtype=np.float)
        for i in np.arange(nElems):
            # Set horizontal sigma
            conductivityModel[i, 0] = model.sigma_horizontal[np.int(elemsS[i])]
            # Set vertical sigma
            conductivityModel[i, 1] = model.sigma_vertical[np.int(elemsS[i])]

        # Get matrix dimensions
        size = conductivityModel.shape

        # Build PETSc structures
        matrix = createSequentialDenseMatrixWithArray(size[0], size[1], conductivityModel)

        # Build path to save the file
        out_path = output.directory_scratch + '/conductivityModel.dat'

        if parEnv.rank == 0:
            # Write PETGEM edges in PETSc format
            writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)

        # ---------------------------------------------------------------
        # Preprocessing receivers
        # ---------------------------------------------------------------
        Print.master('     Receivers')

        # Open receivers_file
        fileID = h5py.File(model.receivers_file, 'r')

        # Read receivers
        receivers = fileID.get('data')[()]

        # Number of receivers
        if receivers.ndim == 1:
            nReceivers = 1
        else:
            dim = receivers.shape
            nReceivers = dim[0]

        # Build Delaunay triangulation with nodes
        tri = Delaunay(nodes)

        # Overwrite Delaunay structure with mesh_file connectivity and points
        tri.simplices = elemsN.astype(np.int32)
        tri.vertices = elemsN.astype(np.int32)

        # Find out which tetrahedral element points are in
        recvElems = tri.find_simplex(receivers, bruteforce=True, tol=1.e-12)

        # Find out which tetrahedral element source point is in
        srcElem = tri.find_simplex(model.src_position, bruteforce=True, tol=1.e-12)

        # Determine if all receiver points were found
        idx = np.where(np.logical_or(recvElems>nElems, recvElems<0))[0]

        # If idx is not empty, there are receivers outside the domain
        if idx.size != 0:
            Print.master('        The following receivers were not located and will not be taken into account ' + str(idx))
            # Update number of receivers
            nReceivers = nReceivers - len(idx)

            if nReceivers == 0:
                Print.master('     No receiver has been found. Nothing to do. Aborting')
                exit(-1)

            # Remove idx from receivers matrix
            receivers = np.delete(receivers, idx, axis=0)

            # Remove idx from recvElems
            recvElems = np.delete(recvElems, idx, axis=0)

        # If srcElem is empty, source not located
        if srcElem == 0:
            Print.master('        Source no located in the computational domain. Please, improve the mesh quality')
            exit(-1)

        # Compute number of dofs per element
        num_dof_in_element = np.int(model.basis_order*(model.basis_order+2)*(model.basis_order+3)/2)

        # Allocate
        data_receiver = np.zeros((nReceivers, 53+num_dof_in_element), dtype=np.float)

        # Fill tmp matrix with receiver positions, element coordinates and
        # nodal indexes
        for i in np.arange(nReceivers):
            # If there is one receiver
            if nReceivers == 1:
                # Get index of tetrahedral element (receiver container)
                iEle = recvElems
                # Get dofs of element container
                dofsElement = dofs[iEle]
            # If there are more than one receivers
            else:
                # Get index of tetrahedral element (receiver container)
                iEle = recvElems[i]
                # Get dofs of element container
                dofsElement = dofs[iEle, :]

            # Get indexes of nodes for iand insert
            nodesReceiver = elemsN[iEle, :]
            data_receiver[i, 0:4] = nodesReceiver
            # Get nodes coordinates for i and insert
            coordEle = nodes[nodesReceiver, :]
            coordEle = coordEle.flatten()
            data_receiver[i, 4:16] = coordEle
            # Get indexes of faces for i and insert
            facesReceiver = elemsF[iEle, :]
            data_receiver[i, 16:20] = facesReceiver
            # Get edges indexes for faces in i and insert
            edgesReceiver = facesE[facesReceiver, :]
            edgesReceiver = edgesReceiver.flatten()
            data_receiver[i, 20:32] = edgesReceiver
            # Get indexes of edges for i and insert
            edgesReceiver = elemsE[iEle, :]
            data_receiver[i, 32:38] = edgesReceiver
            # Get node indexes for edges in i and insert
            edgesNodesReceiver = edgesNodes[edgesReceiver, :]
            edgesNodesReceiver = edgesNodesReceiver.flatten()
            data_receiver[i, 38:50] = edgesNodesReceiver
            # Get receiver coordinates
            coordReceiver = receivers[i,: ]
            data_receiver[i, 50:53] = coordReceiver
            # Get dofs for srcElem and insert
            dofsReceiver = dofsElement
            data_receiver[i, 53::] = dofsReceiver

        # Get matrix dimensions
        size = data_receiver.shape

        # Build PETSc structures
        matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data_receiver)

        # Build path to save the file
        out_path = output.directory_scratch + '/receivers.dat'

        if parEnv.rank == 0:
            # Write PETGEM receivers in PETSc format
            writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)

        # Compute number of dofs per element
        num_dof_in_element = np.int(model.basis_order*(model.basis_order+2)*(model.basis_order+3)/2)

        # Build data for source insertion
        vector = np.zeros(50+num_dof_in_element, dtype=np.float)

        # Get indexes of nodes for srcElem and insert
        nodesSource = elemsN[srcElem, :]
        vector[0:4] = nodesSource
        # Get nodes coordinates for srcElem and insert
        coordSource = nodes[nodesSource, :]
        coordSource = coordSource.flatten()
        vector[4:16] = coordSource
        # Get indexes of faces for srcElem and insert
        facesSource = elemsF[srcElem, :]
        vector[16:20] = facesSource
        # Get edges indexes for faces in srcElem and insert
        edgesFace = facesE[facesSource, :]
        edgesFace = edgesFace.flatten()
        vector[20:32] = edgesFace
        # Get indexes of edges for srcElem and insert
        edgesSource = elemsE[srcElem, :]
        vector[32:38] = edgesSource
        # Get node indexes for edges in srcElem and insert
        edgesNodesSource = edgesNodes[edgesSource, :]
        edgesNodesSource = edgesNodesSource.flatten()
        vector[38:50] = edgesNodesSource
        # Get dofs for srcElem and insert
        dofsSource = dofs[srcElem,:]
        vector[50::] = dofsSource

        # Build PETSc structures
        vector = createSequentialVectorWithArray(vector)

        # Build path to save the file
        out_path = output.directory_scratch + '/source.dat'

        if parEnv.rank == 0:
            # Write PETGEM nodes in PETSc format
            writePetscVector(out_path, vector, communicator=PETSc.COMM_SELF)

        # ---------------------------------------------------------------
        # Sparsity pattern
        # ---------------------------------------------------------------
        # Setup valence for each basis order (adding a small percentage to keep safe)
        valence = np.array([50, 200, 400, 800, 1400, 2500])

        # Build nnz pattern for each row
        nnz = np.full((total_num_dofs), valence[model.basis_order-1], dtype=np.int)

        # Build PETSc structures
        vector = createSequentialVectorWithArray(nnz)

        # Build path to save the file
        out_path = output.directory_scratch + '/nnz.dat'

        if parEnv.rank == 0:
            # Write PETGEM nodes in PETSc format
            writePetscVector(out_path, vector, communicator=PETSc.COMM_SELF)

        # ---------------------------------------------------------------
        # Print mesh statistics
        # ---------------------------------------------------------------
        Print.master(' ')
        Print.master('  Mesh statistics')
        Print.master('     Number of elements:   {0:12}'.format(str(nElems)))
        Print.master('     Number of faces:      {0:12}'.format(str(nFaces)))
        Print.master('     Number of edges:      {0:12}'.format(str(nEdges)))
        Print.master('     Number of dofs:       {0:12}'.format(str(total_num_dofs)))
        Print.master('     Number of boundaries: {0:12}'.format(str(len(indx_boundary_dofs))))

        # ---------------------------------------------------------------
        # Print data model
        # ---------------------------------------------------------------
        Print.master(' ')
        Print.master('  Model data')
        Print.master('     Number of materials:    {0:12}'.format(str(np.max(elemsS)+1)))
        Print.master('     Vector basis order:     {0:12}'.format(str(model.basis_order)))
        Print.master('     Frequency (Hz):         {0:12}'.format(str(model.frequency)))
        Print.master('     Source position (xyz):  {0:12}'.format(str(model.src_position)))
        Print.master('     Source azimuth:         {0:12}'.format(str(model.src_azimuth)))
        Print.master('     Source dip:             {0:12}'.format(str(model.src_dip)))
        Print.master('     Source current:         {0:12}'.format(str(model.src_current)))
        Print.master('     Source length:          {0:12}'.format(str(model.src_length)))
        Print.master('     Sigma horizontal:       {0:12}'.format(str(model.sigma_horizontal)))
        Print.master('     Sigma vertical:         {0:12}'.format(str(model.sigma_vertical)))
        Print.master('     Number of receivers:    {0:12}'.format(str(nReceivers)))

        # Apply barrier for MPI tasks alignement
        parEnv.comm.barrier()

        # Stop timer
        Timers()["Preprocessing"].stop()