コード例 #1
0
ファイル: testFunc.py プロジェクト: jennyzeng/OCR
def findBoundary(data, points):
	"""
	use convex hull algorithm to find the boundary of the image
	and make the pixels that are used to construct the polygon in convex hull.
	:param data: 2 d array of image
	:param points: corresponding coordinates of the pixels that are black (0)
	:return: modified 2-d array
	"""
	hull = ConvexHull(points)
	plt.plot(points[:, 0], points[:, 1], 'o')
	for simplex in hull.simplices:
		plt.plot(points[simplex, 0], points[simplex, 1], 'k-')
		p1 = points[simplex]
		p2 = points[simplex]

		data[p1[0][0]][p1[0][1]] = 1
		data[p1[1][0]][p1[1][1]] = 1
		data[p2[0][0]][p2[0][1]] = 1
		data[p2[1][0]][p2[1][1]] = 1
	### display the convexHull result
	# toimage(data).show()
	# plt.show()

	hull.close()
	return data
コード例 #2
0
def getDelaunayEdges(points):
    """Return the list of edges between points without fake neighbors"""
    delau = Delaunay(points)
    hull = ConvexHull(points)
    hull = path.Path(points[hull.vertices])
    hull = expandPath(hull,1.2)
    allEdges = list()
    badEdges = list()
    for simplex in [x.tolist() for x in delau.simplices]:
        for edge in [[x, simplex[simplex.index(x)-1]] for x in simplex]:
                addCoupleToList(allEdges, edge)
        circumCircleCenter = getCircumCircleCenter(points[simplex])
        if(hull.contains_point(circumCircleCenter) == False):
            simplexPoints = points[simplex]
            edge0 = dist(simplexPoints[0], simplexPoints[1])
            edge1 = dist(simplexPoints[1], simplexPoints[2])
            edge2 = dist(simplexPoints[2], simplexPoints[0])
            largestEdge = max(edge0, edge1, edge2)
            if(edge0 == largestEdge):
                addCoupleToList(badEdges, [simplex[0], simplex[1]])
            elif(edge1 == largestEdge):
                addCoupleToList(badEdges, [simplex[1], simplex[2]])
            elif(edge2 == largestEdge):
                addCoupleToList(badEdges, [simplex[2], simplex[0]])
    
    return [x for x in allEdges if checkCoupleInList(badEdges, x) ==  False]
コード例 #3
0
ファイル: convex_hull.py プロジェクト: daavoo/PyntCloud
 def compute(self):
     """ABC API"""
     self.id = "CH({})".format(self._qhull_options)
     scipy_ConvexHull.__init__(self,
                               self._points,
                               self._incremental,
                               self._qhull_options)
コード例 #4
0
ファイル: polygon.py プロジェクト: XiaoTaoWang/TADLib
 def __init__(self, points):
     """A customized constructor.
     """
     ConvexHull.__init__(self, points)
     indices = self.vertices
     points = self.points
     # Customize
     self.anchors = points[indices]
コード例 #5
0
ファイル: environments.py プロジェクト: g4idrijs/acoular
    def r( self, c, gpos, mpos=0.0):
        """
        Calculates the virtual distances between grid point locations and
        microphone locations or the origin. These virtual distances correspond
        to travel times of the sound along a ray that is traced through the
        medium.

        Parameters
        ----------
        c : float
            The speed of sound to use for the calculation.
        gpos : array of floats of shape (3, N)
            The locations of points in the beamforming map grid in 3D cartesian
            co-ordinates.
        mpos : array of floats of shape (3, M), optional
            The locations of microphones in 3D cartesian co-ordinates. If not
            given, then only one microphone at the origin (0, 0, 0) is
            considered.

        Returns
        -------
        array of floats
            The distances in a twodimensional (N, M) array of floats. If M==1, 
            then only a onedimensional array is returned.
        """
        if isscalar(mpos):
            mpos = array((0, 0, 0), dtype = float32)[:, newaxis]

        # the DE system
        def f1(t, y, v):
            x = y[0:3]
            s = y[3:6]
            vv, dv = v(x)
            sa = sqrt(s[0]*s[0]+s[1]*s[1]+s[2]*s[2])
            x = empty(6)
            x[0:3] = c*s/sa - vv # time reversal
            x[3:6] = dot(s, -dv.T) # time reversal
            return x

        # integration along a single ray
        def fr(x0, n0, rmax, dt, v, xyz, t):
            s0 = n0 / (c+dot(v(x0)[0], n0))
            y0 = hstack((x0, s0))
            oo = ode(f1)
            oo.set_f_params(v)
            oo.set_integrator('vode', 
                              rtol=1e-4, # accuracy !
                              max_step=1e-4*rmax) # for thin shear layer
            oo.set_initial_value(y0, 0)
            while oo.successful():
                xyz.append(oo.y[0:3])
                t.append(oo.t)
                if norm(oo.y[0:3]-x0)>rmax:
                    break
                oo.integrate(oo.t+dt)

        gs2 = gpos.shape[-1]
        gt = empty((gs2, mpos.shape[-1]))
        vv = self.ff.v
        NN = int(sqrt(self.N))
        for micnum, x0 in enumerate(mpos.T):
            xe = gpos.mean(1) # center of grid
            r = x0[:, newaxis]-gpos
            rmax = sqrt((r*r).sum(0).max()) # maximum distance
            nv = spiral_sphere(self.N, self.Om, b=xe-x0)
            rstep = rmax/sqrt(self.N)
            rmax += rstep
            tstep = rstep/c
            xyz = []
            t = []
            lastind = 0
            for i, n0 in enumerate(nv.T):
                fr(x0, n0, rmax, tstep, vv, xyz, t)
                if i and i % NN == 0:
                    if not lastind:
                        dd = ConvexHull(vstack((gpos.T, xyz)), incremental=True)
                    else:
                        dd.add_points(xyz[lastind:], restart=True)
                    lastind = len(xyz)
                    # ConvexHull includes grid if no grid points on hull
                    if dd.simplices.min()>=gs2:
                        break
            xyz = array(xyz)
            t = array(t)
            li = LinearNDInterpolator(xyz, t)
            gt[:, micnum] = li(gpos.T)
        if gt.shape[1] == 1:
            gt = gt[:, 0]
        return c*gt #return distance along ray
コード例 #6
0
    def per_drone_wind_multipliers(self, layer_wind_multiplier=0.9):
        drone_position_matrix = np.asarray([[d.pos[0], d.pos[1]]
                                            for d in self.drones])
        wind_vec_orth = np.asarray([-self.wind_dev[1], self.wind_dev[0]])
        no_drones_covered = 0
        no_exposed_current_layer = 0
        wind_fact = 1

        drone_position_matrix = np.append(drone_position_matrix,
                                          np.zeros(self.N).reshape(self.N, 1),
                                          axis=1)
        drone_position_matrix = np.append(drone_position_matrix,
                                          np.arange(self.N).reshape(self.N, 1),
                                          axis=1)
        #print (self.wind_dev)
        while (self.N - no_drones_covered) > 2:
            reached_edges = [False, False]
            exposed_hull = []
            no_exposed_current_layer = 0

            hull = ConvexHull(drone_position_matrix[:self.N -
                                                    no_drones_covered, 0:2])
            projections = np.matmul(drone_position_matrix[hull.vertices, 0:2],
                                    self.wind_dev[:2]) / np.linalg.norm(
                                        self.wind_dev[:2])
            projections_orth = np.matmul(
                drone_position_matrix[hull.vertices, 0:2],
                wind_vec_orth) / np.linalg.norm(wind_vec_orth)

            sorted_proj_indexes = np.argsort(projections)
            sorted_proj_orth_indexes = np.argsort(projections_orth)

            for i in sorted_proj_indexes:
                drone_position_matrix[hull.vertices[i], 2] = wind_fact
                exposed_hull.append(i)
                no_exposed_current_layer += 1
                if hull.vertices[i] == hull.vertices[
                        sorted_proj_orth_indexes[0]]:
                    reached_edges[0] = True
                elif hull.vertices[i] == hull.vertices[
                        sorted_proj_orth_indexes[-1]]:
                    reached_edges[1] = True

                if reached_edges[0] and reached_edges[1]:
                    break

            sorted_indexes = np.sort(hull.vertices[exposed_hull])
            for i in range(1, no_exposed_current_layer + 1):
                row = sorted_indexes[-i]
                drone_position_matrix[[self.N - no_drones_covered - i,
                                       row]] = drone_position_matrix[[
                                           row, self.N - no_drones_covered - i
                                       ]]

            no_drones_covered += no_exposed_current_layer
            wind_fact = wind_fact * layer_wind_multiplier

        # All that's left is to check if we have one or two points left and fill that with the next wind_fact
        if (self.N - no_drones_covered) >= 1:
            drone_position_matrix[0, 2] = wind_fact
        if (self.N - no_drones_covered) == 2:
            drone_position_matrix[1, 2] = wind_fact

        return drone_position_matrix[np.argsort(drone_position_matrix[:,
                                                                      3])][:,
                                                                           2]
コード例 #7
0
def nn_point(xp, yp, variable, grid_loc, tri, neighbors, triangle_info):
    r"""Generate a natural neighbor interpolation of the observations to the given point.

    This uses the Liang and Hale approach [Liang2010]_. The interpolation will fail if
    the grid point has no natural neighbors.

    Parameters
    ----------
    xp: (N, ) ndarray
        x-coordinates of observations
    yp: (N, ) ndarray
        y-coordinates of observations
    variable: (N, ) ndarray
        observation values associated with (xp, yp) pairs.
        IE, variable[i] is a unique observation at (xp[i], yp[i])
    grid_loc: (float, float)
        Coordinates of the grid point at which to calculate the
        interpolation.
    tri: object
        Delaunay triangulation of the observations.
    neighbors: (N, ) ndarray
        Simplex codes of the grid point's natural neighbors. The codes
        will correspond to codes in the triangulation.
    triangle_info: dictionary
        Pre-calculated triangle attributes for quick look ups. Requires
        items 'cc' (circumcenters) and 'r' (radii) to be associated with
        each simplex code key from the delaunay triangulation.

    Returns
    -------
    value: float
       Interpolated value for the grid location
    """
    edges = triangles.find_local_boundary(tri, neighbors)
    edge_vertices = [segment[0] for segment in polygons.order_edges(edges)]
    num_vertices = len(edge_vertices)

    p1 = edge_vertices[0]
    p2 = edge_vertices[1]

    polygon = list()
    c1 = triangles.circumcenter(grid_loc, tri.points[p1], tri.points[p2])
    polygon.append(c1)

    area_list = []
    total_area = 0.0

    for i in range(num_vertices):

        p3 = edge_vertices[(i + 2) % num_vertices]

        try:

            c2 = triangles.circumcenter(grid_loc, tri.points[p3],
                                        tri.points[p2])
            polygon.append(c2)

            for check_tri in neighbors:
                if p2 in tri.simplices[check_tri]:
                    polygon.append(triangle_info[check_tri]['cc'])

            pts = [polygon[i] for i in ConvexHull(polygon).vertices]
            value = variable[(tri.points[p2][0] == xp)
                             & (tri.points[p2][1] == yp)]

            cur_area = polygons.area(pts)

            total_area += cur_area

            area_list.append(cur_area * value[0])

        except (ZeroDivisionError, qhull.QhullError) as e:
            message = ('Error during processing of a grid. '
                       'Interpolation will continue but be mindful '
                       'of errors in output. ') + str(e)

            log.warning(message)
            return np.nan

        polygon = list()
        polygon.append(c2)

        p2 = p3

    return sum(x / total_area for x in area_list)
コード例 #8
0
ファイル: pourbaix_diagram.py プロジェクト: zizai/pymatgen
    def _get_hull_in_nph_nphi_space(self, entries):
        """
        Generates convex hull of pourbaix diagram entries in composition,
        npH, and nphi space.  This enables filtering of multi-entries
        such that only compositionally stable combinations of entries
        are included.

        Args:
            entries ([PourbaixEntry]): list of PourbaixEntries to construct
                the convex hull

        Returns: list of entries and stable facets corresponding to that
            list of entries

        """
        ion_entries = [entry for entry in entries if entry.phase_type == "Ion"]
        solid_entries = [
            entry for entry in entries if entry.phase_type == "Solid"
        ]

        # Pre-filter solids based on min at each composition
        logger.debug("Pre-filtering solids by min energy at each composition")
        sorted_entries = sorted(
            solid_entries,
            key=lambda x:
            (x.composition.reduced_composition, x.entry.energy_per_atom))
        grouped_by_composition = itertools.groupby(
            sorted_entries, key=lambda x: x.composition.reduced_composition)
        min_entries = [
            list(grouped_entries)[0]
            for comp, grouped_entries in grouped_by_composition
        ]
        min_entries += ion_entries

        logger.debug("Constructing nph-nphi-composition points for qhull")

        vecs = self._convert_entries_to_points(min_entries)
        maxes = np.max(vecs[:, :3], axis=0)
        extra_point = np.concatenate(
            [maxes, np.ones(self.dim) / self.dim], axis=0)

        # Add padding for extra point
        pad = 1000
        extra_point[2] += pad
        points = np.concatenate([vecs, np.array([extra_point])], axis=0)
        logger.debug("Constructing convex hull in nph-nphi-composition space")
        hull = ConvexHull(points, qhull_options="QJ i")

        # Create facets and remove top
        facets = [
            facet for facet in hull.simplices if not len(points) - 1 in facet
        ]

        if self.dim > 1:
            logger.debug("Filtering facets by pourbaix composition")
            valid_facets = []
            for facet in facets:
                comps = vecs[facet][:, 3:]
                full_comps = np.concatenate(
                    [comps, 1 - np.sum(comps, axis=1).reshape(len(comps), 1)],
                    axis=1)
                # Ensure an compositional interior point exists in the simplex
                if np.linalg.matrix_rank(full_comps) > self.dim:
                    valid_facets.append(facet)
        else:
            valid_facets = facets

        return min_entries, valid_facets
コード例 #9
0
def encircle(x, y, ax, **kw):
    p = np.c_[x, y]
    hull = ConvexHull(p)
    poly = Polygon(p[hull.vertices, :], **kw)
    ax.add_patch(poly)
コード例 #10
0
def reader(cube, inPath):
    image = cv2.imread(inPath, -1)

    filtered = filterImage(image)

    # Detect all contours
    contours, hierarchy = cv2.findContours(filtered, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_NONE)
    facelets = getFacelets(contours)

    # Cluster facelets based on angle
    clusters = clusterFacelets(facelets)
    if clusters == None:
        return 'E'

    # Filter the 3 clusters to 9 facelets each
    filterClusters(clusters)

    # Label colors
    for i, cluster in enumerate(clusters):
        for j, facelet in enumerate(cluster):
            hull = cv2.convexHull(facelet[0])
            mask = np.zeros(image.shape, np.uint8)
            cv2.fillConvexPoly(mask, hull, (255, 255, 255))
            masked = cv2.bitwise_and(image, mask)

            color = identifyColor(masked)
            clusters[i][j].append(color)

    # Find center pieces
    centers = []
    for cluster in clusters:
        # Find CM
        sum = [0, 0]
        for facelet in cluster:
            sum[0] += facelet[3][0]
            sum[1] += facelet[3][1]
        mid = [sum[0] / 9, sum[1] / 9]

        # Find closest to CM. This is center piece
        c = cluster[0]
        minD = 1000000
        for facelet in cluster:
            dX = facelet[3][0] - mid[0]
            dY = facelet[3][1] - mid[1]
            d = dX * dX + dY * dY
            if d < minD:
                minD = d
                c = facelet
        centers.append(c)

    # Identify each face. White/yellow is x, and y and z are cw from there
    hull = ConvexHull([center[3] for center in centers])
    centers = [centers[i] for i in hull.vertices]
    clusters = [clusters[i] for i in hull.vertices]
    whiteYellowIndex = 0
    for i, center in enumerate(centers):
        if center[4] == 'w' or center[4] == 'y':
            whiteYellowIndex = i
            break
    # Roll array so that first is x, second is y, third is z
    if whiteYellowIndex == 1:
        centers = [centers[i] for i in [1, 2, 0]]
        clusters = [clusters[i] for i in [1, 2, 0]]
    if whiteYellowIndex == 2:
        centers = [centers[i] for i in [2, 0, 1]]
        clusters = [clusters[i] for i in [2, 0, 1]]

    # For AB_C, clusters on the C face from A and B face perp bisector
    # In order: XY_X XZ_X XY_Y YZ_Y XZ_Z YZ_Z
    facePositions = [
        clusterWithBisector(centers[0][3], centers[1][3], clusters[0]),
        clusterWithBisector(centers[0][3], centers[2][3], clusters[0]),
        clusterWithBisector(centers[0][3], centers[1][3], clusters[1]),
        clusterWithBisector(centers[1][3], centers[2][3], clusters[1]),
        clusterWithBisector(centers[0][3], centers[2][3], clusters[2]),
        clusterWithBisector(centers[1][3], centers[2][3], clusters[2])
    ]

    # Save colors in cube
    orientation = centers[0][4] + centers[1][4]
    saveColors(cube, facePositions, clusters, orientation)
コード例 #11
0
def triangulateSphereIdx(pts):
    hull = ConvexHull(pts)
    return hull.simplices
コード例 #12
0
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull, convex_hull_plot_2d

# The convex hull of a random set of points:

points = np.random.rand(30, 2)
hull = ConvexHull(points)

# Plot it:

_ = convex_hull_plot_2d(hull)
plt.show()
コード例 #13
0
    def _start_pts_noisy_filter(self, plotAll=False):
        startPoints = self._trajDataFeatures[["trajIndex", "startX", "startY"]]
        startCoordinates = startPoints[["startX", "startY"]].values
        corePointFlag = []

        # Fit Nearest Neighbors of start points dataset.
        neigh = NearestNeighbors(n_neighbors=self._startMinSamples, n_jobs=1)
        neigh.fit(startCoordinates)
        totalNums = len(startCoordinates)

        # Confirm core point flag and assign cluster labels for each data point.
        for ind, pts in enumerate(startCoordinates):
            print("Total is {}, now is {}.".format(totalNums, ind + 1))
            pts = pts.reshape(1, -1)
            distance, indices = neigh.kneighbors(pts)
            if distance[:, -1] <= self._startEps:
                corePointFlag.append(1)
            else:
                corePointFlag.append(0)
        startPoints["corePointFlag"] = np.array(corePointFlag)
        startPoints["labels"] = self.__assign_cluster_label(
            startCoordinates,
            eps=self._startEps,
            minSamples=self._startMinSamples)
        self._trajDataFeatures["startZone"] = startPoints["labels"].values

        # Calculate core density for each cluster.
        self._start = pd.DataFrame(None,
                                   columns=[
                                       "clusterId", "clusterPointNums",
                                       "convexHullArea", "density", "signal"
                                   ])
        uniqueLabels = list(
            startPoints[startPoints["labels"] != -1]["labels"].unique())
        clusterId = []
        clusterPointNums = []
        clusterCorePointNums = []
        covexHullArea = []
        density = []
        for i in uniqueLabels:
            clusterId.append(i)
            clusterTmp = startPoints[startPoints["labels"] == i]
            clusterPointNums.append(len(clusterTmp))
            clusterCorePointNums.append(
                len(clusterTmp[clusterTmp["corePointFlag"] == 1]))
            clusterTmp = clusterTmp[clusterTmp["corePointFlag"] == 1]
            clusterTmp.drop(["corePointFlag", "labels", "trajIndex"],
                            axis=1,
                            inplace=True)
            corePtsTmp = clusterTmp.values
            hull = ConvexHull(corePtsTmp)
            covexHullArea.append(hull.area)
            density.append(clusterCorePointNums[-1] / hull.area)
        self._start = {
            "clusterId": clusterId,
            "clusterCorePointNums": clusterCorePointNums,
            "clusterPointNums": clusterPointNums,
            "convexHullArea": covexHullArea,
            "density": density
        }
        self._start = pd.DataFrame(self._start)
        self._startAverageDensity = self._startAlpha * self._start[
            "density"].mean()
        self._start["signal"] = self._start[
            "density"].values > self._startAverageDensity

        noisyCluster = self._start["clusterId"][
            self._start["signal"] != True].values

        # Step 1: remove all non core points
        self._trajDataFeatures["startBrokenFlag"] = startPoints[
            "corePointFlag"] == False
        # Step 2: remove all points which are noisy points for DBSCAN
        self._trajDataFeatures["startBrokenFlag"] = self._trajDataFeatures[
            "startBrokenFlag"] | (startPoints["labels"] == -1)
        for ind in noisyCluster:
            self._trajDataFeatures["startBrokenFlag"] = self._trajDataFeatures[
                "startBrokenFlag"] | (startPoints["labels"] == ind)

        # Plot and save all results
        if plotAll == True:
            convexHullPts = startPoints[(startPoints["corePointFlag"] == 1)
                                        & (startPoints["labels"] != -1)][[
                                            "startX", "startY"
                                        ]].values
            uniqueLabels = list(
                startPoints[startPoints["labels"] != -1]["labels"].unique())
            labels = startPoints[(startPoints["corePointFlag"] == 1) & (
                startPoints["labels"] != -1)]["labels"].values
            plt.figure()
            plt.imshow(background)
            density = []
            for ind, item in enumerate(uniqueLabels):
                index = labels == item
                tmp = convexHullPts[index]
                hull = ConvexHull(tmp)
                for simplex in hull.simplices:
                    plt.plot(tmp[simplex, 0],
                             tmp[simplex, 1],
                             'k-',
                             linewidth=0.9)
                plt.plot(convexHullPts[index, 0],
                         convexHullPts[index, 1],
                         '+',
                         markersize=0.3,
                         color="red")  #RGB[ind])
                pos = convexHullPts[index][2] - 40
                d = len(convexHullPts[index, 0]) / hull.area
                d = round(d, 3)
                density.append(d)
                #plt.text(pos[0], pos[1], str(ind) + "_" + str(density[ind]), fontsize=10, color='red') #str(density)

            cond = [round(sum(density) * self._startAlpha / (ind + 1))]
            plt.title("Entry core points clusters")
            plt.savefig("..//Plots//EntryCorePointsClusteringResults.pdf",
                        dpi=700,
                        bbox_inches='tight')
            plt.figure()
            plt.plot(self._start["clusterId"].astype(int).values,
                     self._start["density"].values, 'k-s')
            x = np.linspace(-1, 7, 2000)
            y = np.array(cond * 2000, dtype='float64')
            plt.plot(x, y, 'b--')
            plt.title("Entry points average density")
            plt.legend(["density", "threshold"])
            plt.xlabel("clusterID")
            plt.ylabel("Core area")
            plt.xlim(self._start["clusterId"].min() - 0.1,
                     self._start["clusterId"].max() + 0.1)
            plt.ylim(self._start["density"].min() - 0.5,
                     self._start["density"].max() + 0.5)
            plt.savefig("..//Plots//EntryCorePointsDensityPlots.pdf",
                        dpi=700,
                        bbox_inches='tight')
            plt.close("all")

            plt.figure()
            plt.imshow(background)
            normalPts = self._trajDataFeatures[
                self._trajDataFeatures["startBrokenFlag"] == False][[
                    "startX", "startY"
                ]].values
            plt.plot(normalPts[:, 0],
                     normalPts[:, 1],
                     '+',
                     markersize=0.3,
                     color="red")
            plt.title("Start clusters after filtering")
            plt.savefig("..//Plots//EntryCorePointsAfterFiltering.pdf",
                        dpi=700,
                        bbox_inches='tight')
        return self._start
コード例 #14
0
ファイル: geomengines.py プロジェクト: haudren/stabilipy
def scipy_convexify_polyhedron(hrep):
    points = np.array(cdd.Polyhedron(hrep).get_generators())[:, 1:]
    ch = ConvexHull(points, qhull_options='QbB')
    return ch.points
コード例 #15
0
num_corners = 100
dst = cv2.goodFeaturesToTrack(gray, num_corners, 0.01, 10)
dst = np.int0(dst)

blue = img[:,:,0]
img[:,:,0] = img[:,:,2]
img[:,:,2] = blue

# Uncomment to see the corners
# for corner in dst:
#     x,y = corner.ravel()
#     cv2.circle(img,(x,y), 10, [0,255,0], -1)

corners = np.array([corner.ravel() for corner in dst])

hull = ConvexHull(corners)
# for simplex in hull.simplices:
#     first_corner = tuple(corners[simplex, 0].tolist())
#     second_corner = tuple(corners[simplex, 1].tolist())
#     if first_corner in points_dict:
#         points_dict[first_corner].append(second_corner)
#     else:
#         points_dict[first_corner] = [second_corner]
#
#     if second_corner in points_dict:
#         points_dict[second_corner].append(first_corner)
#     else:
#         points_dict[second_corner] = [first_corner]
#
#     plt.plot(first_corner, second_corner, 'k-', c='r')
# plt.show()
コード例 #16
0
def generate_guess(vdc, pr_vec, show_plots=False):
    """
    Given a single unfolded loop and centroid return the intial guess for the fitting.
    We generate most of the guesses by looking at the loop centroid and looking
    at the nearest intersection points with the loop, which is a polygon.

    Parameters
    -----------
    vdc : 1D numpy array
        DC offsets
    pr_vec : 1D numpy array
        Piezoresponse or unfolded loop
    show_plots : Boolean (Optional. Default = False)
        Whether or not the plot the convex hull, centroid, intersection points

    Returns
    -----------------
    init_guess_coef_vec : 1D Numpy array
        Fit guess coefficient vector
    """

    points = np.transpose(np.array([np.squeeze(vdc), pr_vec]))  # [points,axis]

    geom_centroid, geom_area = calculate_loop_centroid(points[:, 0], points[:,
                                                                            1])

    hull = ConvexHull(points)
    """
    Now we need to find the intersection points on the N,S,E,W
    the simplex of the complex hull is essentially a set of line equations.
    We need to find the two lines (top and bottom) or (left and right) that
    interect with the vertical / horizontal lines passing through the geometric centroid
    """
    def find_intersection(A, B, C, D):
        """
        Finds the coordinates where two line segments intersect

        Parameters
        ------------
        A, B, C, D : Tuple or 1D list or 1D numpy array
            (x,y) coordinates of the points that define the two line segments AB and CD

        Returns
        ----------
        obj : None or tuple
            None if not intersecting. (x,y) coordinates of intersection
        """
        def ccw(A, B, C):
            """Credit - StackOverflow"""
            return (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] -
                                                                    A[0])

        def line(p1, p2):
            """Credit - StackOverflow"""
            A = (p1[1] - p2[1])
            B = (p2[0] - p1[0])
            C = (p1[0] * p2[1] - p2[0] * p1[1])
            return A, B, -C

        def intersection(L1, L2):
            """
            Finds the intersection of two lines (NOT line segments).
            Credit - StackOverflow
            """
            D = L1[0] * L2[1] - L1[1] * L2[0]
            Dx = L1[2] * L2[1] - L1[1] * L2[2]
            Dy = L1[0] * L2[2] - L1[2] * L2[0]
            if D != 0:
                x = Dx / D
                y = Dy / D
                return x, y
            else:
                return None

        if ((ccw(A, C, D) is not ccw(B, C, D)) and
            (ccw(A, B, C) is not ccw(A, B, D))) is False:
            return None
        else:
            return intersection(line(A, B), line(C, D))

    # start and end coordinates of each line segment defining the convex hull
    outline_1 = np.zeros((hull.simplices.shape[0], 2), dtype=np.float)
    outline_2 = np.zeros((hull.simplices.shape[0], 2), dtype=np.float)
    for index, pair in enumerate(hull.simplices):
        outline_1[index, :] = points[pair[0]]
        outline_2[index, :] = points[pair[1]]
    """Find the coordinates of the points where the vertical line through the
    centroid intersects with the convex hull"""
    y_intersections = []
    for pair in range(outline_1.shape[0]):
        x_pt = find_intersection(outline_1[pair], outline_2[pair],
                                 [geom_centroid[0], hull.min_bound[1]],
                                 [geom_centroid[0], hull.max_bound[1]])
        if type(x_pt) != type(None):
            y_intersections.append(x_pt)
    '''
    Find the coordinates of the points where the horizontal line through the
    centroid intersects with the convex hull
    '''
    x_intersections = []
    for pair in range(outline_1.shape[0]):
        x_pt = find_intersection(outline_1[pair], outline_2[pair],
                                 [hull.min_bound[0], geom_centroid[1]],
                                 [hull.max_bound[0], geom_centroid[1]])
        if type(x_pt) != type(None):
            x_intersections.append(x_pt)
    '''
    Default values if not intersections can be found.
    '''
    if len(y_intersections) == 0:
        min_y_intercept = min(pr_vec)
        max_y_intercept = max(pr_vec)
    else:
        min_y_intercept = min(y_intersections[0][1], y_intersections[1][1])
        max_y_intercept = max(y_intersections[0][1], y_intersections[1][1])

    if len(x_intersections) == 0:
        min_x_intercept = min(vdc) / 2.0
        max_x_intercept = max(vdc) / 2.0
    else:
        min_x_intercept = min(x_intersections[0][0], x_intersections[1][0])
        max_x_intercept = max(x_intersections[0][0], x_intersections[1][0])

    # Only the first four parameters use the information from the intercepts
    # a3, a4 are swapped in Stephen's figure. That was causing the branches to swap during fitting
    # the a3, a4 are fixed now below:
    init_guess_coef_vec = np.zeros(shape=9)
    init_guess_coef_vec[0] = min_y_intercept
    init_guess_coef_vec[1] = max_y_intercept - min_y_intercept
    init_guess_coef_vec[2] = min_x_intercept
    init_guess_coef_vec[3] = max_x_intercept
    init_guess_coef_vec[4] = 0
    init_guess_coef_vec[5] = 2  # 0.5
    init_guess_coef_vec[6] = 2  # 0.2
    init_guess_coef_vec[7] = 2  # 1.0
    init_guess_coef_vec[8] = 2  # 0.2

    if show_plots:
        fig, ax = plt.subplots()
        ax.plot(points[:, 0], points[:, 1], 'o')
        ax.plot(geom_centroid[0], geom_centroid[1], 'r*')
        ax.plot([geom_centroid[0], geom_centroid[0]],
                [hull.max_bound[1], hull.min_bound[1]], 'g')
        ax.plot([hull.min_bound[0], hull.max_bound[0]],
                [geom_centroid[1], geom_centroid[1]], 'g')
        for simplex in hull.simplices:
            ax.plot(points[simplex, 0], points[simplex, 1], 'k')
        ax.plot(x_intersections[0][0], x_intersections[0][1], 'r*')
        ax.plot(x_intersections[1][0], x_intersections[1][1], 'r*')
        ax.plot(y_intersections[0][0], y_intersections[0][1], 'r*')
        ax.plot(y_intersections[1][0], y_intersections[1][1], 'r*')
        ax.plot(vdc, loop_fit_function(vdc, init_guess_coef_vec))

    return init_guess_coef_vec
コード例 #17
0
ファイル: encounterSurface.py プロジェクト: JujuDel/CodinGame

# ################################################ #
# #####                 MAIN                 ##### #
# ################################################ #

if __name__ == '__main__':
    n = int(input())
    m = int(input())

    p = []
    for i in range(n):
        x, y = map(int, input().split())
        p.append((x, y))
    p = np.array(p)
    p = p[ConvexHull(p).vertices]
    print("Pol1:", *p, file=sys.stderr)

    q = []
    for i in range(m):
        x, y = map(int, input().split())
        q.append((x, y))
    q = np.array(q)
    q = q[ConvexHull(q).vertices]
    print("Pol2:", *q, file=sys.stderr)

    inter = convexIntersect(Polygon([Point(x, y) for (x, y) in p]),
                            Polygon([Point(x, y) for (x, y) in q]))

    if inter:
        inter.make_convex()
コード例 #18
0
ファイル: whales.py プロジェクト: lelou6666/aggregation
        image_file = cbook.get_sample_data(f_name[0])
        image = plt.imread(image_file)
        ax1.imshow(image)

        # edges = cv2.Canny(image,50,400)

        im2, contours, hierarchy = cv2.findContours(im_bw, cv2.RETR_TREE,
                                                    cv2.CHAIN_APPROX_NONE)

        for ii, cnt in enumerate(contours):
            if cnt.shape[0] > 20:
                cnt = np.reshape(cnt, (cnt.shape[0], cnt.shape[2]))
                cnt_list = cnt.tolist()
                X, Y = zip(*cnt_list)
                # plt.plot(X,Y)
                hull = ConvexHull(cnt)
                # plt.plot(cnt[hull.vertices,0], cnt[hull.vertices,1], 'r--', lw=2)

                shapely_points = [
                    shapely.geometry.shape({
                        "type": "Point",
                        "coordinates": (x, y)
                    }) for (x, y) in zip(X, Y)
                ]
                concave_hull, edge_points = alpha_shape(shapely_points,
                                                        alpha=0.01)

                # print edge_points

                if isinstance(concave_hull, shapely.geometry.Polygon):
                    # plot_polygon(ax1,concave_hull)
def color_transfer(Is, It):
    global pi, pt, Vt
    H, W, _ = Is.shape
    Ht, Wt, _ = It.shape
    mink_norm = 5
    sigma = 2
    kappa = 10
    # step 1:White-balancing and rotating
    # Grey-Egde algorithm to estimate illuminations of the source and target
    print('calculating the white balance')
    [wRs, wGs, wBs] = weightedGE(Is, kappa, mink_norm, sigma)
    WBs = np.array([wRs, wGs, wBs])
    [wRt, wGt, wBt] = weightedGE(It, kappa, mink_norm, sigma)
    WBt = np.array([wRt, wGt, wBt])

    WBs = np.sqrt(3) * WBs / (np.sqrt(np.sum(WBs**2)) * 1.0)
    WBt = np.sqrt(3) * WBt / (np.sqrt(np.sum(WBt**2)) * 1.0)
    Is = Is.reshape(-1, 3, order='F').T
    It = It.reshape(-1, 3, order='F').T
    Is = np.diag(1.0 / WBs).dot(Is)  # pass
    It = np.diag(1.0 / WBt).dot(It)  # pass
    Is = rotate_to_Zaxis(Is, np.array([1, 1, 1]))  # pass
    It = rotate_to_Zaxis(It, np.array([1, 1, 1]))  # pass

    # Step 2: Luminance Matchingnce
    print('matching the Luminance')
    Is = Is.T
    It = It.T
    Is[:, 2] = normalizeIntensity(Is[:, 2], It[:, 2], H, W)

    # Step 3: Color Gamut Aligning
    print('Color Gamut Aligning')
    Ms = np.mean(Is, 0)
    Mt = np.mean(It, 0)
    Is = Is - np.matlib.repmat(Ms, H * W, 1)
    It = It - np.matlib.repmat(Mt, Ht * Wt, 1)
    hull_s = ConvexHull(Is)
    Chi = hull_s.simplices
    hull_t = ConvexHull(It)
    Cht = hull_t.simplices
    Vt = hull_t.volume
    idi = np.unique(Chi[:])
    idt = np.unique(Cht[:])
    pi = Is[idi - 1, :]
    pt = It[idt - 1, :]
    # compute the optimal     matrix
    x0 = np.array([0, 1, 1])
    x = fmin_bfgs(myfunoptimal, x0, maxiter=50, disp=1)
    T = np.array([[x[1] * np.cos(x[0]), -x[1] * np.sin(x[0]), 0],
                  [x[2] * np.sin(x[0]), x[2] * np.cos(x[0]), 0], [0, 0, 1]])
    # Align two gamuts
    Io = T.dot(Is.T)
    Mt[2] = Ms[2]
    Io = Io + np.matlib.repmat(Mt.reshape(1, 3).T, 1, H * W)

    # STEP 4: Rotate back and undo white-balancing
    print('Rotate back and undo white-balancing')
    Io = rotate_back_from_Zaxis(Io, np.array([1, 1, 1]))
    Io = np.diag(WBt).dot(Io)
    Io[Io < 0] = 0
    Io[Io > 1] = 1
    Io = Io.T
    Io_ = np.reshape(Io, (H, W, 3), order='F')
    Io_2 = np.asarray(Io_ * 255, dtype=np.uint8)
    Io_2 = cv2.cvtColor(Io_2, cv2.COLOR_RGB2BGR)

    return Io_2
コード例 #20
0
if __name__ == '__main__':
    mpl.rc('text', usetex=True)
    mpl.rcParams['mathtext.fontset'] = 'stix'
    mpl.rcParams['font.family'] = 'STIXGeneral'
    count = 20
    random.seed(18.9654)
    ref_point2d = [0.001, 0.001]
    set2d = np.zeros((count, 2))
    for i in range(count):
        for u in range(2):
            rand = random.random()
            set2d[i, u] = rand if (rand > ref_point2d) or (
                rand > 0.3) else random.random()
    #hv_2d_calc = HyperVolumeCalculator(ref_point2d)
    #pf = hv_2d_calc.extract_front(set2d)
    hullraw = ConvexHull(set2d)
    hull = [set2d[x] for x in hullraw.vertices]
    hull.append(hull[0])
    size = 0.48 * 5.8091048611149611602
    fig = plt.figure(figsize=[size, 0.75 * size])

    fig.set_size_inches(size, 0.7 * size)
    ax = fig.add_subplot(111)
    ax.set_axisbelow(True)
    plt.axis([0, max(set2d[:, 0] + 0.07), 0.05, max(set2d[:, 1] * 1.1)])
    plt.setp(ax.get_xticklabels(), fontsize=9)
    plt.setp(ax.get_yticklabels(), fontsize=9)
    pfx = [hull[i][0] for i in range(len(hull))]
    pfy = [hull[u][1] for u in range(len(hull))]
    plt.plot(set2d[:, 0], set2d[:, 1], 'bo', markersize=4)
    plt.plot(pfx, pfy, 'ro', markersize=4)
コード例 #21
0
def cluster_label_points(title, points, ax, eps, min_cluster, n_clusters,
                         clabel_size, words_only):
    db = DBSCAN(eps=eps, min_samples=min_cluster).fit(points)
    #clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster)
    #db = clusterer.fit(points)
    labels = db.labels_
    texts = []
    bboxes = []
    r = get_renderer(ax.get_figure())
    for l in set(labels):
        text_set = False
        if l == -1:
            continue
        ind = np.argwhere(labels == l)[:, 0]
        #print("\n##\nlabel: {}, {} documents".format(l,len(ind)))
        lpoints = points[ind]
        if len(ind) > min_cluster:
            try:
                hull = ConvexHull(lpoints)
            except:
                continue
            cx = np.mean(hull.points[hull.vertices, 0])
            cy = np.mean(hull.points[hull.vertices, 1])
            c = [cx, cy]

            if words_only:
                title = title.split(",")[0].replace("{", "")

                text = ax.annotate(title,
                                   c,
                                   fontsize=clabel_size,
                                   ha="center",
                                   va="center",
                                   bbox={
                                       'facecolor': "white",
                                       'alpha': 0.4,
                                       'pad': 0.2,
                                       'boxstyle': 'round'
                                   })
                texts.append(text)
                #return text
                #break

            else:
                title = title.split(",")[0].replace("{", "")
                x = []
                y = []
                for i, v in enumerate(lpoints[hull.vertices, :]):
                    p1 = extend_points(c, v)
                    #p2 = extend_points(c,v])
                    x.append(p1[0])
                    y.append(p1[1])
                    #x.append(p2[0])
                    #y.append(p2[1])
                    # plt.plot(
                    #     [p1[0],p2[0]],
                    #     [p1[1],p2[1]],
                    #     'k-',
                    #     linewidth=0.5
                    # )
                    if not text_set:
                        if p1[0] > cx:
                            ha = "left"
                        else:
                            ha = "right"
                        pl = extend_points(c, p1)
                        texts.append(
                            ax.annotate(
                                title,
                                c,
                                #p1,
                                #xytext=pl,
                                va="center",
                                #ha=ha,
                                ha="center",
                                fontsize=clabel_size,
                                #arrowprops=dict(width=0.2,headwidth=0.1),
                                bbox={
                                    'facecolor': "white",
                                    'alpha': 0.4,
                                    'pad': 0.2,
                                    'boxstyle': 'round'
                                }))
                        text_set = True
                orig_len = len(x)
                x = x[-3:-1] + x + x[1:3]
                y = y[-3:-1] + y + y[1:3]

                t = np.arange(len(x))
                ti = np.linspace(2, orig_len + 1, 10 * orig_len)

                x2 = interp1d(t, x, kind='cubic')(ti)
                y2 = interp1d(t, y, kind='cubic')(ti)

                x = lpoints[hull.vertices, 0]
                x = np.append(x, [x[:1]])
                y = lpoints[hull.vertices, 1]
                y = np.append(y, [y[:1]])
                coords = np.array(list(zip(x, y)))
                coords = chaikins_corner_cutting(coords)

                x2 = coords[:, 0]
                y2 = coords[:, 1]

                #x2 = interpolate.BSpline(t, x, nt)
                #y2 = interpolate.BSpline(t, y, nt)
                plt.plot(x2, y2, 'k-', linewidth=0.8)
                # break here, just do the biggest cluster
                #break
    if len(texts) == 0:
        print(f"couldn't find a cluster for {title}")
    return texts
コード例 #22
0
ファイル: voronoi.py プロジェクト: asya-b/voronoi
    print('finding nearest neighbours...\t\t\t{0:06.2f}%'.format(
        i / len(vor.point_region) * 100),
          end='\r')
    nbrs_idx = kdt.query(points[i], k=n_neighbors)[1]

    neighborhood_area = 0
    neighborhood_probability = 0
    for idx in nbrs_idx:
        reg_idx = vor.point_region[idx]
        indices = vor.regions[reg_idx]
        prob = probs[idx]
        if -1 in indices:  # some regions may be open
            neighborhood_area += np.inf
            neighborhood_probability += prob
        else:
            neighborhood_area += ConvexHull(vor.vertices[indices]).volume
            neighborhood_probability += prob
    den[i] = neighborhood_probability / neighborhood_area

# normalised to central density = 1
den /= max(den)

# remove particles below given limit
den = np.ma.masked_where(den <= lower_limit, den).filled(0)

# normalize chosen colormap
colormap = copy.copy(cm.get_cmap('magma'))
colormap.set_bad(colormap(0))
mapper = cm.ScalarMappable(norm=LogNorm(vmax=max(den), vmin=lower_limit),
                           cmap=colormap)
コード例 #23
0
ファイル: clustering.py プロジェクト: DABAKER165/pepMeld
 def expand_cluster_convexhull(self, df_in_name, df_cluster_name,
                               df_cluster_expanded_name):
     from scipy.spatial import ConvexHull
     if len(self.df_dict[df_cluster_name]) < 1:
         print('NO CLUSTERS EXIST:')
         print(df_cluster_expanded_name)
         self.df_dict[df_cluster_expanded_name] = pd.DataFrame(columns=[
             'CLUSTER', self.sample_name_column, 'PERCENTILE',
             'CLUSTER_COUNT', 'ORIGINAL_CLUSTER_COUNT', 'ORIGINAL_CLUSTER',
             'CLUSTER_RATIO', 'CLUSTERED', 'EXPANDED'
         ])
         return
     print('CLUSTERS EXIST:')
     print(df_cluster_expanded_name)
     cluster_names_df = self.df_dict[df_cluster_name][[
         'CLUSTER', self.sample_name_column, 'PERCENTILE', 'CLUSTER_COUNT'
     ]].drop_duplicates(subset=None, keep='first', inplace=False)
     self.df_dict[df_cluster_expanded_name] = pd.DataFrame()
     for index, row in cluster_names_df.iterrows():
         # get the cluster
         cluster_points_df_i = self.df_dict[df_cluster_name].loc[
             (self.df_dict[df_cluster_name]['PERCENTILE'] ==
              row['PERCENTILE'])
             & (self.df_dict[df_cluster_name]['CLUSTER'] == row['CLUSTER'])
             & (self.df_dict[df_cluster_name][self.sample_name_column]
                == row[self.sample_name_column])][['X', 'Y']]
         # convert DF to an array
         cluster_points_array = cluster_points_df_i.values
         # get the hull for the cluster
         hull = ConvexHull(cluster_points_array)
         # get the vertices of the hull
         hull_vertices = cluster_points_array[hull.vertices]
         # get points inside the hull vertices as a numpy array
         contained_points = in_hull(
             self.df_dict[df_in_name][['X', 'Y']].values, hull_vertices)
         # contained_points
         # convert back to dataframe
         contained_points_df = pd.DataFrame(contained_points,
                                            columns=['X', 'Y'])
         # name the cluster, Sample name, percentile and oritinal clustercount
         contained_points_df['CLUSTER'] = [row['CLUSTER']] * len(
             contained_points_df.index)
         contained_points_df[self.sample_name_column] = [
             row[self.sample_name_column]
         ] * len(contained_points_df.index)
         contained_points_df['PERCENTILE'] = [row['PERCENTILE']] * len(
             contained_points_df.index)
         contained_points_df['ORIGINAL_CLUSTER_COUNT'] = [
             row['CLUSTER_COUNT']
         ] * len(contained_points_df.index)
         # get the newly expanded cluster count
         contained_points_df['CLUSTER_COUNT'] = contained_points_df.groupby(
             'CLUSTER')['CLUSTER'].transform('count')
         # Label the points that are part of the original cluster
         # get the points of the original cluster
         original_cluster_points_df_i = cluster_points_df_i[['X', 'Y']]
         # label this data frame as the original
         original_cluster_points_df_i['ORIGINAL_CLUSTER'] = [
             'ORIGINAL'
         ] * len(original_cluster_points_df_i.index)
         contained_points_df = contained_points_df.merge(
             original_cluster_points_df_i, how='outer', on=['X', 'Y'])
         # contained_points_df
         self.df_dict[df_cluster_expanded_name] = pd.concat(
             [self.df_dict[df_cluster_expanded_name], contained_points_df],
             ignore_index=True)
     self.df_dict[df_cluster_expanded_name].ORIGINAL_CLUSTER.fillna(
         'EXPANDED', inplace=True)
     self.df_dict[df_cluster_expanded_name]['CLUSTERED'] = [
         'CLUSTERED'
     ] * len(self.df_dict[df_cluster_expanded_name].index)
     self.df_dict[df_cluster_expanded_name]['CLUSTER_RATIO'] = round(
         self.df_dict[df_cluster_expanded_name]['ORIGINAL_CLUSTER_COUNT'] /
         self.df_dict[df_cluster_expanded_name]['CLUSTER_COUNT'], 2)
コード例 #24
0
def filter(pts):
    hull = ConvexHull(pts, qhull_options='Q12')
    return [pts[i] for i in hull.vertices.tolist()]
コード例 #25
0
    def transform(self, X):
        d = []

        for _, row in X.iterrows():
            df_x = row.iloc[np.arange(3, 33, 3).tolist()]
            df_y = row.iloc[np.arange(4, 34, 3).tolist()]
            df_z = row.iloc[np.arange(5, 35, 3).tolist()]
            x = np.array(df_x[df_x.notnull()].tolist())
            y = np.array(df_y[df_y.notnull()].tolist())
            z = np.array(df_z[df_z.notnull()].tolist())

            n = len(x)
            digits = 4  # number of decimal places in calculations

            pts = np.vstack((x, y, z)).T

            # length of all lines to the origin
            lengths = []
            for pt in pts:
                length = np.linalg.norm(pt)
                lengths.append(length)
            lengths = np.array(lengths)

            # angles between all points and origin
            # e.g. points a, b, c would give 3 angles: a-o-b, a-o-c, b-o-c
            angles = []
            areas = []
            for ang in combinations(pts, 2):
                # Calculate all 3 angles of triangle using 3 sides
                O0 = np.array(ang[0])
                O1 = np.array(ang[1])

                cosine_angle = np.dot(O0, -1 * O1) / (np.linalg.norm(O0) *
                                                      np.linalg.norm(-1 * O1))
                angle = np.arccos(cosine_angle)

                if not np.isnan(angle):
                    angles.append(angle)

                    # Area
                    area = 0.5 * np.linalg.norm(np.cross(O1, -1 * O0))
                    areas.append(area)

            # Determine conex hull of posture, including origin as a data point
            pts = np.vstack((pts, np.array([0, 0, 0])))
            hull = ConvexHull(pts)

            # Centroid of convex hull
            cx = np.mean(hull.points[hull.vertices, 0])
            cy = np.mean(hull.points[hull.vertices, 1])
            cz = np.mean(hull.points[hull.vertices, 2])

            d.append({
                'id': int(row.iloc[0]),
                'class': int(row.iloc[1]),
                'user': int(row.iloc[2]),
                'n_markers': n,
                'x_mean': np.mean(x).round(digits),
                'x_std': np.std(x).round(digits),
                'x_min': np.min(x).round(digits),
                'x_max': np.max(x).round(digits),
                'y_mean': np.mean(y).round(digits),
                'y_std': np.std(y).round(digits),
                'y_min': np.min(y).round(digits),
                'y_max': np.max(y).round(digits),
                'z_mean': np.mean(z).round(digits),
                'z_std': np.std(z).round(digits),
                'z_min': np.min(z).round(digits),
                'z_max': np.max(z).round(digits),
                'l_mean': np.mean(lengths).round(digits),
                'l_std': np.std(lengths).round(digits),
                'l_min': np.min(lengths).round(digits),
                'l_max': np.max(lengths).round(digits),
                'ang_mean': np.mean(angles).round(digits),
                'ang_std': np.std(angles).round(digits),
                'ang_min': np.min(angles).round(digits),
                'ang_max': np.max(angles).round(digits),
                'area_mean': np.mean(areas).round(digits),
                'area_std': np.std(areas).round(digits),
                'area_min': np.min(areas).round(digits),
                'area_max': np.max(areas).round(digits),
                'conv_hull_vol': np.round(hull.volume, digits),
                'conv_hull_cx': np.round(cx, digits),
                'conv_hull_cy': np.round(cy, digits),
                'conv_hull_cz': np.round(cz, digits)
            })

        df = pd.DataFrame(d)
        X = df[[
            'id', 'class', 'user', 'n_markers', 'x_mean', 'x_std', 'x_min',
            'x_max', 'y_mean', 'y_std', 'y_min', 'y_max', 'z_mean', 'z_std',
            'z_min', 'z_max', 'l_mean', 'l_std', 'l_min', 'l_max', 'ang_mean',
            'ang_std', 'ang_min', 'ang_max', 'area_mean', 'area_std',
            'area_min', 'area_max', 'conv_hull_vol', 'conv_hull_cx',
            'conv_hull_cy', 'conv_hull_cz'
        ]]

        return X
コード例 #26
0
    def minimum_bounding_rectangle(points):
        """
        Find the smallest bounding rectangle for a set of points.
        Returns a set of points representing the corners of the bounding box.

        :param points: a list of coordinates
        :rval: an nx2 matrix of coordinates
        """
        points = np.array(points)
        points = points.reshape(len(points), 2)

        pi2 = np.pi / 2.

        # get the convex hull for the points
        hull_points = points[ConvexHull(points).vertices]

        # calculate edge angles
        edges = np.zeros((len(hull_points) - 1, 2))
        edges = hull_points[1:] - hull_points[:-1]

        angles = np.zeros((len(edges)))
        angles = np.arctan2(edges[:, 1], edges[:, 0])

        angles = np.abs(np.mod(angles, pi2))
        angles = np.unique(angles)

        # find rotation matrices
        # XXX both work
        rotations = np.vstack([
            np.cos(angles),
            np.cos(angles - pi2),
            np.cos(angles + pi2),
            np.cos(angles)
        ]).T
        # rotations = np.vstack([
        #     np.cos(angles),
        #     -np.sin(angles),
        #     np.sin(angles),
        #     np.cos(angles)]).T
        rotations = rotations.reshape((-1, 2, 2))

        # apply rotations to the hull
        rot_points = np.dot(rotations, hull_points.T)

        # find the bounding points
        min_x = np.nanmin(rot_points[:, 0], axis=1)
        max_x = np.nanmax(rot_points[:, 0], axis=1)
        min_y = np.nanmin(rot_points[:, 1], axis=1)
        max_y = np.nanmax(rot_points[:, 1], axis=1)

        # find the box with the best area
        areas = (max_x - min_x) * (max_y - min_y)
        best_idx = np.argmin(areas)

        # return the best box
        x1 = max_x[best_idx]
        x2 = min_x[best_idx]
        y1 = max_y[best_idx]
        y2 = min_y[best_idx]
        r = rotations[best_idx]

        rval = np.zeros((4, 2))
        rval[0] = np.dot([x1, y2], r)
        rval[1] = np.dot([x2, y2], r)
        rval[2] = np.dot([x2, y1], r)
        rval[3] = np.dot([x1, y1], r)

        return rval
コード例 #27
0
ファイル: Path.py プロジェクト: sciple/neurobau
 def get_explored_surface(self):
     """Compute convex hull on the array of coordinates"""
     tmp_hull = ConvexHull(self.coordinates)
     return tmp_hull.volume
コード例 #28
0
ファイル: phasediagram.py プロジェクト: essil1/ase-laser
    def __init__(self, references, filter='', verbose=True):
        """Phase-diagram.

        references: list of (name, energy) tuples
            List of references.  The energy must be the total energy and not
            energy per atom.  The names can also be dicts like
            ``{'Zn': 1, 'O': 2}`` which would be equivalent to ``'ZnO2'``.
        filter: str or list of str
            Use only those references that match the given filter.
            Example: ``filter='ZnO'`` will select those that
            contain zinc or oxygen.
        verbose: bool
            Write information.
        """

        if not references:
            raise ValueError("You must provide a non-empty list of references"
                             " for the phase diagram! "
                             "You have provided '{}'".format(references))
        filter = parse_formula(filter)[0]

        self.verbose = verbose

        self.species = OrderedDict()
        self.references = []
        for name, energy in references:
            if isinstance(name, basestring):
                count = parse_formula(name)[0]
            else:
                count = name
                name = formula_hill(count)

            if filter and any(symbol not in filter for symbol in count):
                continue

            natoms = 0
            for symbol, n in count.items():
                natoms += n
                if symbol not in self.species:
                    self.species[symbol] = len(self.species)
            self.references.append((count, energy, name, natoms))

        ns = len(self.species)
        self.symbols = [None] * ns
        for symbol, id in self.species.items():
            self.symbols[id] = symbol

        if verbose:
            print('Species:', ', '.join(self.symbols))
            print('References:', len(self.references))
            for i, (count, energy, name, natoms) in enumerate(self.references):
                print('{:<5}{:10}{:10.3f}'.format(i, name, energy))

        self.points = np.zeros((len(self.references), ns + 1))
        for s, (count, energy, name, natoms) in enumerate(self.references):
            for symbol, n in count.items():
                self.points[s, self.species[symbol]] = n / natoms
            self.points[s, -1] = energy / natoms

        if len(self.points) == ns:
            # Simple case that qhull would choke on:
            self.simplices = np.arange(ns).reshape((1, ns))
            self.hull = np.ones(ns, bool)
        else:
            hull = ConvexHull(self.points[:, 1:])

            # Find relevant simplices:
            ok = hull.equations[:, -2] < 0
            self.simplices = hull.simplices[ok]

            # Create a mask for those points that are on the convex hull:
            self.hull = np.zeros(len(self.points), bool)
            for simplex in self.simplices:
                self.hull[simplex] = True

        if verbose:
            print('Simplices:', len(self.simplices))
コード例 #29
0
ファイル: baseline.py プロジェクト: charlesll/rampy
def baseline(x_input,y_input,bir,method, **kwargs):
    """Allows subtracting a baseline under a x y spectrum.

    Parameters
    ----------
    x_input : ndarray
        x values.
    y_input : ndarray
        y values.
    bir : ndarray
        Contain the regions of interest, organised per line. 
        For instance, roi = np.array([[100., 200.],[500.,600.]]) will 
        define roi between 100 and 200 as well as between 500 and 600.
        Note: This is NOT used by the "als" and "arPLS" algorithms, but still is a requirement when calling the function.
        bir and method probably will become args in a futur iteration of rampy to solve this.
    methods : str
        "poly": polynomial fitting, with splinesmooth the degree of the polynomial.
        "unispline": spline with the UnivariateSpline function of Scipy, splinesmooth is 
                     the spline smoothing factor (assume equal weight in the present case);
        "gcvspline": spline with the gcvspl.f algorythm, really robust. 
                     Spectra must have x, y, ese in it, and splinesmooth is the smoothing factor;
                     For gcvspline, if ese are not provided we assume ese = sqrt(y). 
                     Requires the installation of gcvspline with a "pip install gcvspline" call prior to use;
        "exp": exponential background;
        "log": logarythmic background;
        "rubberband": rubberband baseline fitting;
        "als": automatic least square fitting following Eilers and Boelens 2005;
        "arPLS": automatic baseline fit using the algorithm from Baek et al. 2015 
                 Baseline correction using asymmetrically reweighted penalized least squares smoothing, Analyst 140: 250-257.

    kwargs
    ------
    polynomial_order : Int
        The degree of the polynomial (0 for a constant), default = 1.
    s : Float
        spline smoothing coefficient for the unispline and gcvspline algorithms.
    lam : Float
        float, the lambda smoothness parameter for the ALS and ArPLS algorithms. Typical values are between 10**2 to 10**9, default = 10**5.
    p : Float
        float, for the ALS algorithm, advised value between 0.001 to 0.1, default = 0.01.
    ratio : float
        ratio parameter of the arPLS algorithm. default = 0.01.
    niter : Int
        number of iteration of the ALS algorithm, default = 10.
    p0_exp : List
        containg the starting parameter for the exp baseline fit with curve_fit. Default = [1.,1.,1.].
    p0_log : List
        containg the starting parameter for the log baseline fit with curve_fit. Default = [1.,1.,1.,1.].

    Returns
    -------
    out1 : ndarray
        Contain the corrected signal.
    out2 : ndarray
        Contain the baseline.

    """
    # we get the signals in the bir
    yafit_unscaled = get_portion_interest(x_input,y_input,bir)

    # signal standard standardization with sklearn
    # this helps for polynomial fitting
    X_scaler = preprocessing.StandardScaler().fit(x_input.reshape(-1, 1))
    Y_scaler = preprocessing.StandardScaler().fit(y_input.reshape(-1, 1))

    # transformation
    x = X_scaler.transform(x_input.reshape(-1, 1))
    y = Y_scaler.transform(y_input.reshape(-1, 1))

    yafit = np.copy(yafit_unscaled)
    yafit[:,0] = X_scaler.transform(yafit_unscaled[:,0].reshape(-1, 1))[:,0]
    yafit[:,1] = Y_scaler.transform(yafit_unscaled[:,1].reshape(-1, 1))[:,0]

    y = y.reshape(len(y_input))

    if method == 'poly':

        # optional parameters
        poly_order = kwargs.get('polynomial_order',1)

        coeffs = np.polyfit(yafit[:,0],yafit[:,1],poly_order)

        baseline_fitted = np.polyval(coeffs,x)

    elif method == 'unispline':

        # optional parameters
        splinesmooth = kwargs.get('s',2.0)

        # fit of the baseline
        coeffs = UnivariateSpline(yafit[:,0],yafit[:,1], s=splinesmooth)

        baseline_fitted = coeffs(x)

    elif method == 'gcvspline':

        try:
            from gcvspline import gcvspline, splderivative
        except ImportError:
            print('ERROR: Install gcvspline to use this mode (needs a working FORTRAN compiler).')
            
        # optional parameters
        splinesmooth = kwargs.get('s',2.0)

        # Spline baseline with mode 1 of gcvspl.f, see gcvspline documentation
        c, wk, ier = gcvspline(yafit[:,0],yafit[:,1],np.sqrt(np.abs(yafit[:,1])),splinesmooth,splmode = 1) # gcvspl with mode 1 and smooth factor

        baseline_fitted = splderivative(x,yafit[:,0],c)

    elif method == 'gaussian':
        ### Baseline is of the type y = a*exp(-log(2)*((x-b)/c)**2)
        # optional parameters
        p0_gauss = kwargs.get('p0_gaussian',[1.,1.,1.])
        ## fit of the baseline
        coeffs, pcov = curve_fit(rampy.gaussian,yafit[:,0],yafit[:,1],p0 = p0_gauss)

        baseline_fitted = rampy.gaussian(x,coeffs[0],coeffs[1],coeffs[2])

    elif method == 'exp':
        ### Baseline is of the type y = a*exp(b*(x-xo))
        # optional parameters
        p0_exp = kwargs.get('p0_exp',[1.,1.,1.])
        ## fit of the baseline
        coeffs, pcov = curve_fit(rampy.funexp,yafit[:,0],yafit[:,1],p0 = p0_exp)

        baseline_fitted = rampy.funexp(x,coeffs[0],coeffs[1],coeffs[2])

    elif method == 'log':
        ### Baseline is of the type y = a*exp(b*(x-xo))
        # optional parameters
        p0_log = kwargs.get('p0_log',[1.,1.,1.,1.])
        ## fit of the baseline
        coeffs, pcov = curve_fit(rampy.funlog,yafit[:,0],yafit[:,1],p0 = p0_log)

        baseline_fitted = rampy.funlog(x,coeffs[0],coeffs[1],coeffs[2],coeffs[3])

    elif method == 'rubberband':
        # code from this stack-exchange forum
        #https://dsp.stackexchange.com/questions/2725/how-to-perform-a-rubberband-correction-on-spectroscopic-data

        # Find the convex hull
        v = ConvexHull(np.array([x, y])).vertices

        # Rotate convex hull vertices until they start from the lowest one
        v = np.roll(v, -v.argmin())
        # Leave only the ascending part
        v = v[:v.argmax()]

        # Create baseline using linear interpolation between vertices
        baseline_fitted = np.interp(x, x[v], y[v])

    elif method == 'als':
        # Matlab code in Eilers et Boelens 2005
        # Python addaptation found on stackoverflow: https://stackoverflow.com/questions/29156532/python-baseline-correction-library

        # optional parameters
        lam = kwargs.get('lam',1.0*10**5)
        p = kwargs.get('p',0.01)
        niter = kwargs.get('niter',10)

        # starting the algorithm
        L = len(y)
        D = sparse.csc_matrix(np.diff(np.eye(L), 2))
        w = np.ones(L)
        for i in range(niter):
            W = sparse.spdiags(w, 0, L, L)
            Z = W + lam * D.dot(D.transpose())
            z = sparse.linalg.spsolve(Z, w*y)
            w = p * (y > z) + (1-p) * (y < z)

        baseline_fitted = z

    elif method == 'arPLS':
        # Adaptation of the Matlab code in Baek et al 2015

        # optional parameters
        lam = kwargs.get('lam',1.0*10**5)
        ratio = kwargs.get('ratio',0.01)

        N = len(y)
        D = sparse.csc_matrix(np.diff(np.eye(N), 2))
        w = np.ones(N)

        while True:
            W = sparse.spdiags(w, 0, N, N)
            Z = W + lam * D.dot(D.transpose())
            z = sparse.linalg.spsolve(Z, w*y)
            d = y - z
            # make d- and get w^t with m and s
            dn = d[d<0]
            m = np.mean(dn)
            s = np.std(dn)
            wt = 1.0/(1 + np.exp( 2* (d-(2*s-m))/s ) )
            # check exit condition and backup
            if norm(w-wt)/norm(w) < ratio:
                break
            w = wt

        baseline_fitted = z

    return y_input.reshape(-1,1)-Y_scaler.inverse_transform(baseline_fitted.reshape(-1, 1)), Y_scaler.inverse_transform(baseline_fitted.reshape(-1, 1))
    
コード例 #30
0
ファイル: pourbaix_diagram.py プロジェクト: zizai/pymatgen
    def get_pourbaix_domains(pourbaix_entries, limits=None):
        """
        Returns a set of pourbaix stable domains (i. e. polygons) in
        pH-V space from a list of pourbaix_entries

        This function works by using scipy's HalfspaceIntersection
        function to construct all of the 2-D polygons that form the
        boundaries of the planes corresponding to individual entry
        gibbs free energies as a function of pH and V. Hyperplanes
        of the form a*pH + b*V + 1 - g(0, 0) are constructed and
        supplied to HalfspaceIntersection, which then finds the
        boundaries of each pourbaix region using the intersection
        points.

        Args:
            pourbaix_entries ([PourbaixEntry]): Pourbaix entries
                with which to construct stable pourbaix domains
            limits ([[float]]): limits in which to do the pourbaix
                analysis

        Returns:
            Returns a dict of the form {entry: [boundary_points]}.
            The list of boundary points are the sides of the N-1
            dim polytope bounding the allowable ph-V range of each entry.
        """
        if limits is None:
            limits = [[-2, 16], [-4, 4]]

        # Get hyperplanes
        hyperplanes = [
            np.array([-PREFAC * entry.npH, -entry.nPhi, 0, -entry.energy]) *
            entry.normalization_factor for entry in pourbaix_entries
        ]
        hyperplanes = np.array(hyperplanes)
        hyperplanes[:, 2] = 1

        max_contribs = np.max(np.abs(hyperplanes), axis=0)
        g_max = np.dot(-max_contribs, [limits[0][1], limits[1][1], 0, 1])

        # Add border hyperplanes and generate HalfspaceIntersection
        border_hyperplanes = [[-1, 0, 0,
                               limits[0][0]], [1, 0, 0, -limits[0][1]],
                              [0, -1, 0, limits[1][0]],
                              [0, 1, 0, -limits[1][1]], [0, 0, -1, 2 * g_max]]
        hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes])
        interior_point = np.average(limits, axis=1).tolist() + [g_max]
        hs_int = HalfspaceIntersection(hs_hyperplanes,
                                       np.array(interior_point))

        # organize the boundary points by entry
        pourbaix_domains = {entry: [] for entry in pourbaix_entries}
        for intersection, facet in zip(hs_int.intersections,
                                       hs_int.dual_facets):
            for v in facet:
                if v < len(pourbaix_entries):
                    this_entry = pourbaix_entries[v]
                    pourbaix_domains[this_entry].append(intersection)

        # Remove entries with no pourbaix region
        pourbaix_domains = {k: v for k, v in pourbaix_domains.items() if v}
        pourbaix_domain_vertices = {}

        for entry, points in pourbaix_domains.items():
            points = np.array(points)[:, :2]
            # Initial sort to ensure consistency
            points = points[np.lexsort(np.transpose(points))]
            center = np.average(points, axis=0)
            points_centered = points - center

            # Sort points by cross product of centered points,
            # isn't strictly necessary but useful for plotting tools
            points_centered = sorted(
                points_centered,
                key=cmp_to_key(lambda x, y: x[0] * y[1] - x[1] * y[0]))
            points = points_centered + center

            # Create simplices corresponding to pourbaix boundary
            simplices = [
                Simplex(points[indices])
                for indices in ConvexHull(points).simplices
            ]
            pourbaix_domains[entry] = simplices
            pourbaix_domain_vertices[entry] = points

        return pourbaix_domains, pourbaix_domain_vertices
コード例 #31
0
def density(model, refinement=0):
    """
    Create a Voronoi mesh and calculate the local particle density on its vertices.

    The local density is calculated as follows:
    for each vertex, compute the density of each neighbour region as
    one over the area and assign the average of
    the neighbouring density to the vertex.

    Parameters
    ----------
    model : simulation.builder.Model
        the Model object containing
    refinement : int (defaults : 0)
        number of subdivision for refining the mesh (0 == None)
    Returns
    -------
    tri : matplotlib.tri.Triangulation
        the triangulation mesh (refined if set as)
    vert_density : numpy.array
        the array containing the local denstity associated with the tri mesh


    Example
    -------
    To plot the result using matplotlib use :

    .. code-block:: python

        import matplotlib.pyplot as plt
        tri, density = data_proc.density(model)
        plt.tricontour(tri, density) # to draw contours
        plt.tricontourf(tri, density) # ot draw filled contours
        plt.show()

    Note
    ----
    As of now, the numerical results may not be quantitatively accurate
    but should qualitatively represent the density.
    """
    vor = Voronoi(model.pos)
    vert_density = np.zeros(max(vor.vertices.shape))  # density vector
    reg_num = np.zeros(max(
        vor.vertices.shape))  # nbr of regions per vertex --> averaging
    for point_index, reg in enumerate(vor.point_region):
        vertices = vor.regions[reg]
        if vertices:
            if -1 not in vertices:
                area = ConvexHull(vor.vertices[vertices]).area  # gets the area
                vert_density[
                    vertices] += 1 / area  # makes it a density (sort-of)
                reg_num[vertices] += 1
    vert_density /= reg_num  # averaging

    # getting rid of really ugly border points
    new_vert, vert_density = (
        vor.vertices[vor.vertices[:, 0] >= np.min(model.pos[:, 0])],
        vert_density[vor.vertices[:, 0] >= np.min(model.pos[:, 0])])

    new_vert, vert_density = (
        new_vert[new_vert[:, 0] <= np.max(model.pos[:, 0])],
        vert_density[new_vert[:, 0] <= np.max(model.pos[:, 0])])

    new_vert, vert_density = (
        new_vert[new_vert[:, 1] >= np.min(model.pos[:, 1])],
        vert_density[new_vert[:, 1] >= np.min(model.pos[:, 1])])

    new_vert, vert_density = (
        new_vert[new_vert[:, 1] <= np.max(model.pos[:, 1])],
        vert_density[new_vert[:, 1] <= np.max(model.pos[:, 1])])

    # for triangulation refinement
    tri2 = Triangulation(*new_vert.T)
    if refinement:
        tri2.set_mask(TriAnalyzer(tri2).get_flat_tri_mask(0.1))
        refiner = UniformTriRefiner(tri2)
        print(len(tri2.neighbors), vert_density.shape)
        tri, vert_density = refiner.refine_field(vert_density,
                                                 subdiv=refinement)
    else:
        tri, vert_density = tri2, vert_density

    return tri, vert_density
コード例 #32
0
def to_ineq(points):
    ineq = ConvexHull(points).equations
    return ineq[:, :-1], -ineq[:, -1]
コード例 #33
0
 def find_perimeter_nodes(self, pts):
     """
     Uses a convex hull to locate the perimeter nodes of the Voronoi grid,
     then sets them as fixed value boundary nodes.
     It then sets/updates the various relevant node lists held by the grid, 
     and returns *node_status*, *core_nodes*, *boundary_nodes*.
     """
 
     # Calculate the convex hull for the set of points
     from scipy.spatial import ConvexHull
     hull = ConvexHull(pts, qhull_options='Qc') # see below why we use 'Qt'
     
     # The ConvexHull object lists the edges that form the hull. We need to
     # get from this list of edges the unique set of nodes. To do this, we
     # first flatten the list of vertices that make up all the hull edges 
     # ("simplices"), so it becomes a 1D array. With that, we can use the set()
     # function to turn the array into a set, which removes duplicate vertices.
     # Then we turn it back into an array, which now contains the set of IDs for
     # the nodes that make up the convex hull.
     #   The next thing to worry about is the fact that the mesh perimeter 
     # might contain nodes that are co-planar (that is, co-linear in our 2D 
     # world). For example, if you make a set of staggered points for a
     # hexagonal lattice using make_hex_points(), there will be some 
     # co-linear points along the perimeter. The ones of these that don't 
     # form convex corners won't be included in convex_hull_nodes, but they
     # are nonetheless part of the perimeter and need to be included in
     # the list of boundary_nodes. To deal with this, we pass the 'Qt'
     # option to ConvexHull, which makes it generate a list of coplanar
     # points. We include these in our set of boundary nodes.
     convex_hull_nodes = numpy.array(list(set(hull.simplices.flatten())))
     coplanar_nodes = hull.coplanar[:,0]
     boundary_nodes = numpy.concatenate((convex_hull_nodes, coplanar_nodes))
 
     # Now we'll create the "node_status" array, which contains the code
     # indicating whether the node is interior and active (=0) or a
     # boundary (=1). This means that all perimeter (convex hull) nodes are
     # initially flagged as boundary code 1. An application might wish to change
     # this so that, for example, some boundaries are inactive.
     node_status = numpy.zeros(len(pts[:,0]), dtype=numpy.int8)
     node_status[boundary_nodes] = 1
     
     # It's also useful to have a list of interior nodes
     core_nodes = numpy.where(node_status==0)[0]
     
     #save the arrays and update the properties
     self.node_status = node_status
     self._num_active_nodes = node_status.size
     self._num_core_nodes = len(core_nodes)
     self._num_core_cells = len(core_nodes)
     self.core_cells = numpy.arange(len(core_nodes))
     self.node_corecell = numpy.empty(node_status.size)
     self.node_corecell.fill(BAD_INDEX_VALUE)
     self.node_corecell[core_nodes] = self.core_cells
     self.active_cells = numpy.arange(node_status.size)
     self.cell_node = core_nodes
     self.activecell_node = core_nodes
     self.corecell_node = core_nodes
     self._boundary_nodes = boundary_nodes
     
     # Return the results
     return node_status, core_nodes, boundary_nodes
コード例 #34
0
    new_face_vert = [
        icosahedron_vertices[face[0]], icosahedron_vertices[face[1]],
        icosahedron_vertices[face[2]]
    ]
    face_cache.append(new_face_vert)
faces = np.asarray(face_cache)

#######################################
###### RECURSIVELY MAKING THE ICOSPHERE###

sph_vert, sph_tri = create_unit_sphere_vert(4)
x, y, z = sph_vert[:,
                   0], sph_vert[:,
                                1], sph_vert[:,
                                             2]  #Arrays corresponting the the cartesian coords
ch = ConvexHull(sph_vert)
"""
hull_indices = np.unique(ch.simplices.flat)
hull_pts = sph_vert[hull_indices, :]
print("len hull points", len(hull_pts))
print(len(ch.simplices))

z_pts = hull_pts[:,2]

zmax = np.max(z_pts)
neighmax = np.max(ch.neighbors)
print("z max", zmax)
print("neigh max", neighmax)
"""
# ch.simplices contains the information of the vertices of each of the faces
#
コード例 #35
0
    def GetFloorPolygon(self, i, yamlFilepath):
        ''' return intersecting surface polygon of floor i, generated from floor cutting '''

        result = ""

        floor_name = self.floor_name_lst[i]
        print(
            "current floor, ", floor_name,
            "******************************************************************************"
        )
        #display shapes
        #v = self.canvas._display

        # set parameters
        s = self.s
        dbscan = self.dbscan
        k = self.k
        calcconvexhull = False
        use_obb = False

        # cutting_height of each building storey
        cutting_height = self.storeyElevation_lst[i] + 1.0
        # loading customize parameters from the .yml file
        yml_file = open(yamlFilepath, 'r')
        yml_data = yaml.load(yml_file, Loader=Loader)
        str1 = "f" + str(i)
        if str1 in yml_data.keys():
            dict2 = yml_data[str1]
            if 'cutting_height' in dict2.keys():
                value = float(dict2['cutting_height'])
                cutting_height = self.storeyElevation_lst[i] + value
            if 'k' in dict2.keys():
                k = float(dict2['k'])
            if 'use_obb' in dict2.keys():
                if dict2['use_obb'] == True:
                    use_obb = True
            if 's' in dict2.keys():
                s = float(dict2['s'])
            if 'dbscan' in dict2.keys():
                dbscan = float(dict2['dbscan'])
            if 'calcconvexhull' in dict2.keys():
                if dict2['calcconvexhull'] == True:
                    calcconvexhull = True
        if use_obb:
            print(
                "use_obb, ", use_obb, "floor name,", floor_name,
                " ----------------------------------------------------------------------"
            )
            pts = GetOrientedBoundingBoxShapeCompound(
                self.floor_compound_shapes_lst[i], False)
            Z_value = []
            for pt in pts:
                Z_value.append(pt.Z())
            z_max = max(Z_value)
            z_min = min(Z_value)
            z_mid = 0.5 * (z_max + z_min)
            pts_low = []
            pts_up = []
            for pt in pts:
                if pt.Z() < z_mid:
                    pts_low.append(pt)
                else:
                    pts_up.append(pt)
            corners_top = pts_up
            pyocc_corners_list = []
            for pt in corners_top:
                pyocc_corners_list.append([
                    float("{:.3f}".format(pt.X())),
                    float("{:.3f}".format(pt.Y()))
                ])
            points = np.array(pyocc_corners_list)
            obb_hull = ConvexHull(points)
            result = []
            for idx in obb_hull.vertices:
                result.append(pyocc_corners_list[idx])
            poly_footprint = Polygon(result)
            return [poly_footprint]

        print("cutting height,", cutting_height)
        section_shape = GetSectionShape(cutting_height,
                                        self.floor_compound_shapes_lst[i])
        #v.DisplayShape(section_shape, color="RED", update=True)
        # get the section shape edges
        edges = GetShapeEdges(section_shape)
        if s != 0:
            first_xy = GetEdgeSamplePointsPerDistance(edges, s)
        else:
            first_xy = GetEdges2DPT(edges)
        np_points = np.array(first_xy)
        corners = GetNumpyOBB(np_points,
                              calcconvexhull=calcconvexhull,
                              show_plot=False)
        OBB_poly = Polygon(corners.tolist())

        # create result dir
        if not os.path.exists('./result/Overlap/' + floor_name):
            os.makedirs('./result/Overlap/' + floor_name)
        img_filepath = "./result/Overlap/" + floor_name + "/obbAndPoints.png"

        # save result as images in the result folder
        SavePloyAndPoints(OBB_poly,
                          np_points,
                          color='b',
                          filepath=img_filepath)
        cluster_filepath = "./result/Overlap/" + floor_name + "/clusters.png"
        cluster_lst = GetDBSCANClusteringlst(np_points,
                                             dbscan,
                                             showplot=False,
                                             saveplot=cluster_filepath)
        line = str()
        per_floot_poly = []
        poly_count = 0
        for np_member_array in cluster_lst:
            poly_count += 1
            print(len(np_member_array))
            print("starting concave hull")
            hull = concaveHull(np_member_array, k=k, if_optimal=False)
            self.WriteConcave2WKT(hull, floor_name, poly_count)
            poly = Polygon(hull)
            print("polygon validation is: ", poly.is_valid, poly.area)
            poly_filepath = "./result/Overlap/" + floor_name + "/polygon" + str(
                poly_count) + ".png"
            OBB_points = GetNumpyOBB(np_member_array, show_plot=False)
            OBB_poly = Polygon(OBB_points.tolist())
            print(
                "OBB_poly area,", OBB_poly.area, " name,", floor_name,
                "---------------------------------------------------------------------"
            )

            if not poly.is_valid:
                print("Try to repair validation:")
                new_poly = poly.buffer(0)
                line = line + "Repaired_" + str(new_poly.is_valid) + "_" + str(
                    float("{:.2f}".format(new_poly.area)))
                print(new_poly.is_valid, new_poly.area)
                un_poly = ops.unary_union(new_poly)
                print(type(un_poly), un_poly.is_valid, "Union area,",
                      un_poly.area)

                # if the poly is wrong, replace with OBB_poly
                if un_poly.area < (0.3 * OBB_poly.area):
                    un_poly = OBB_poly

                per_floot_poly.append(un_poly)

                if un_poly.geom_type == 'MultiPolygon':
                    for geom in un_poly.geoms:
                        xs, ys = geom.exterior.xy
                        plt.plot(xs, ys, color="r")
                    plt.savefig(poly_filepath)
                    plt.close()

                elif un_poly.geom_type == 'Polygon':
                    SavePloyAndPoints(un_poly,
                                      np_member_array,
                                      filepath=poly_filepath)
                else:
                    print("Error polygon generation from concave hull failed!")
            else:

                line = line + "True, Floor Area" + str(
                    float("{:.2f}".format(poly.area)))
                print("Polygon True, no need repair")
                SavePloyAndPoints(poly,
                                  np_member_array,
                                  filepath=poly_filepath)
                per_floot_poly.append(poly)
        return per_floot_poly  # [polygon] or [polygons]
コード例 #36
0
ファイル: convex_hull.py プロジェクト: rayansamy/convml_tt
def calc_point_offsets(points, scale=0.2, show_plot=False):
    """
    Calculate offset point for each point in points which is outside a smooth
    rerepresentation of the convex hull of points in 2D. This is useful for
    positioning labels or inset axes outside plotted points
    """
    Np, _ = points.shape
    hull = ConvexHull(points)

    vertices = list(hull.vertices)
    vertices.insert(0, vertices[-1])

    x_h, y_h = points[vertices, 0], points[vertices, 1]

    if show_plot:
        plt = _import_matplotlib()
        plt.plot(x_h, y_h, "r--", lw=2)

    def make_t(x, y):
        t = np.arange(x.shape[0], dtype=float)
        t /= t[-1]
        return t

    x_h, y_h = _filter_close_points(x_h, y_h)

    # again we have to ensure that the path is cyclic
    x_h = np.insert(x_h, 0, x_h[-1])
    y_h = np.insert(y_h, 0, y_h[-1])

    t = make_t(x_h, y_h)
    Nt = 100
    nt = np.linspace(0, 1, Nt)

    cs_x = CubicSpline(t, x_h, bc_type="periodic")
    cs_y = CubicSpline(t, y_h, bc_type="periodic")

    x_s = cs_x(nt)
    y_s = cs_y(nt)

    if show_plot:
        plt = _import_matplotlib()
        plt.plot(x_s, y_s, marker=".")

    points_s = np.array([x_s, y_s]).T

    lx, ly = np.max(x_s) - np.min(x_s), np.max(y_s) - np.min(y_s)
    l = np.sqrt(lx**2.0 + ly**2.0)

    offset_points = []

    for n in range(Np):
        point = points[n]

        dist_xy = point - points_s
        dist_xy[:, 0] /= lx
        dist_xy[:, 1] /= ly

        dists = np.linalg.norm(dist_xy, axis=-1)
        k = np.argmin(dists)
        point_nearest = points_s[k]

        if dists[k] < 0.1:
            if show_plot:
                plt.plot(point_nearest[0],
                         point_nearest[1],
                         marker="s",
                         color="red")

            kl, kr = k - 5, k + 5
            if kr >= Nt:
                kr -= Nt
            d = points_s[kr] - points_s[kl]
            d[0] /= lx
            d[1] /= ly
            d = np.array([d[1], -d[0]])
            d /= np.linalg.norm(d)

            d[0] *= scale * lx
            d[1] *= scale * ly
        else:
            if show_plot:
                (line, ) = plt.plot(point_nearest[0],
                                    point_nearest[1],
                                    marker="s")
                plt.plot(point[0],
                         point[1],
                         marker="o",
                         color=line.get_color())

            d = point_nearest - point
            d /= np.linalg.norm(d)
            d *= scale * l

        point_outside = point_nearest + d

        if show_plot:
            plt.plot(point_outside[0],
                     point_outside[1],
                     marker=".",
                     color="blue")

        offset_points.append(point_outside)

    return np.array(offset_points)
コード例 #37
0
ファイル: convex_hull.py プロジェクト: aashish24/pyntcloud
 def compute(self):
     """ABC API"""
     scipy_ConvexHull.__init__(self,
                               self._points,
                               self._incremental,
                               self._qhull_options)