예제 #1
0
파일: createmap.py 프로젝트: kentwar/pySAIL
        def estimatemeans(self):
            feature_positions = []
            for i in range(len(self.edges[0]) - 1):
                for j in range(len(self.edges[1]) - 1):
                    if len(self.points[i, j]) > 0:
                        feature_positions.append([i, j])

            tree = KDTree(feature_positions)
            for i in range(len(self.edges[0]) - 1):
                for j in range(len(self.edges[1]) - 1):
                    if self.points[i, j] == []:

                        radius, neighbour = tree.query(x=np.array([i, j]), k=1)
                        if radius > feature_resolution[0] / 10:
                            self.estmeans[i, j] = np.nan
                            self.means[i, j] = np.nan
                        else:
                            nearby = tree.data[tree.query_ball_point(
                                x=np.array([i, j]), r=radius)]
                            #print([self.trumeans[i] for i in nearby])
                            estmean = np.nanmean(
                                [self.trumeans[i.astype(int)] for i in nearby])
                            self.estmeans[i, j] = estmean
                            self.means[i, j] = estmean
                    else:
                        self.means[i, j] = self.trumeans[i, j]
예제 #2
0
class NeighborsFinder:
    def __init__(self, data):
        """Find neigbors
        
        Args:
            data (pd.DataFrame or dict): data with at columns id,pos,and eventually range
        """
        if data is not None:

            self.data = data

            # Safety checks
            assert isinstance(data, pd.DataFrame)
            assert "pos" in data.columns

            # Initialize KDTree finder from scipy
            self.tree = KDTree(self.data["pos"].tolist())

    def find_in_range(self, obj, search_range):

        # TODO
        # Will not work with double colliders
        # Can be made using colliders in PyGame ?

        # Safety check
        assert hasattr(self, "tree")

        # Get position from dataset
        pos = obj.pos

        # Find neighbors in range
        # We remove the first one which is the identity object
        idx = self.tree.query_ball_point(pos, search_range)

        # Return filtered data
        ids = self.data.iloc[idx].index.tolist()
        assert obj.id not in ids
        return ids

    def find_closest(self, obj, k=1):

        # Safety check
        assert hasattr(self, "tree")

        # Get object position from which we want to find neighbors
        pos = obj.pos

        # Get position from dataset
        distances, idx = self.tree.query(pos, k=k)

        if k == 1:
            distances = [distances]
            idx = [idx]

        # Get ids from dataset
        # Safe check object is not in neighbors, which would mean above we remove another overlapping objects with [1:]
        ids = self.data.iloc[idx].index.tolist()
        assert obj.id not in ids
        return distances, ids
예제 #3
0
def make_adjacency_matrix(data, k):
    kt = KDTree(data) 
    out = zeros((len(data),len(data)))
    for i, points in enumerate(data):
        distance, neighbors = kt.query_ball_point(points, k)
        for j in neighbors[1:]:
            out[i][j] = 1;
            out[j][i] = 1; #to ensure symmetry, one can imagine the the nearest neighbors is not necessarily associative
    return out 
예제 #4
0
def create_main_grid(points, extend, img_size=32, values=[]):
    """Create grid and caluclate features
    :param points: Array Vstack [x, y, z, classification] [m]
    :type points: float
    :param extend: Array [minX minY, maxX maxY]
    :type extend: float
    :param img_size: Spatial size of feature area Default 32. Should be 2 to power of n
    :type img_size: int
    :param values: Values for Feature Stats, if non is passed height is used
    :type values: float
    """
    tree = KDTree(points[:, 0:2])
    buff = int(img_size / 2)

    if values:
        points[:, 2] = values

    minX = extend[0, 0] - buff
    minY = extend[0, 1] - buff
    maxX = extend[1, 0] + buff
    maxY = extend[1, 1] + buff

    gridX = np.linspace(int(minX), int(maxX), int(maxX - minX + 1))
    gridY = np.linspace(int(minY), int(maxY), int(maxY - minY + 1))

    f1 = np.zeros((len(gridX), len(gridY)))
    f2 = np.zeros((len(gridX), len(gridY)))
    f3 = np.zeros((len(gridX), len(gridY)))

    for x, i in zip(gridX, range(0, len(gridX))):
        for y, j in zip(gridY, range(0, len(gridY))):
            list = tree.query_ball_point([x, y], 1.4)
            cell_ext = np.array([[x - 0.5, y - 0.5], [x + 0.5, y + 0.5]])
            cell_points = clip(points[list], cell_ext)

            if cell_points.any():

                f1[i, j] = np.mean(cell_points[:, 2])
                f2[i, j] = np.min(cell_points[:, 2])
                f3[i, j] = np.max(cell_points[:, 2])

    return [f1, f2, f3]
예제 #5
0
class scan(cluster):
    def __init__(self, filepath):
        start = time.time()
        self.file = File(filepath, mode="r")
        self.scale = self.file.header.scale[0]
        self.offset = self.file.header.offset[0]
        self.tree = KDTree(
            np.vstack([self.file.x, self.file.y, self.file.z]).transpose())
        self.time = self.file.header.get_date()
        end = time.time() - start
        print("Time Elapsed: {}".format(end))

    def nearNeighbor(self, point, k=1):
        return self.tree.query(point, k=k)

    def radialcluster(self, point, radius):
        neighbor = self.tree.data[self.tree.query(point, k=1)[1]]
        points = self.tree.data[self.tree.query_ball_point(neighbor, radius)]
        print("{} Points \n".format(points.shape[0]))
        return np.array(points)
예제 #6
0
def make_edge_graph(data, k, ball = True):
    kt = KDTree(data)
    out = [set() for i in data]
    k_max = 0
    k_min = np.infty
    for i, points in enumerate(data):
        if(ball):
            neighbors = kt.query_ball_point(points, k)
        else:
            distance, neighbors = kt.query(points,int(k+1))
            neighbors = neighbors[1:]
        for j in neighbors:
            if(j != i):
                out[i].add(j)
                if(len(out[i])>k_max):
                    k_max = len(out[i])
                out[j].add(i)
                if(len(out[j])>k_max):
                    
                    k_max = len(out[j])
        if(len(out[i])<k_min):
            k_min = len(out[i])

    return (k_min, k_max, out)
예제 #7
0
values = np.vstack((las.classification, las.intensity)).transpose()
tree = KDTree(coords2d)

time_delta_0 = datetime.datetime.now() - t0
print ('Time read and tree {0}'.format(time_delta_0))

df = pd.DataFrame()
features = []
n = 0
t1 = datetime.datetime.now()
for point, value in zip(coords3d, values):

    gridx = np.linspace(point[0] - 20, point[0] + 20, 40)
    gridy = np.linspace(point[1] - 20, point[1] + 20, 40)

    list = tree.query_ball_point([point[0], point[1]], 56, p=2, eps=0)

    coo = coords3d[list]

    x = coo[:, 0]
    y = coo[:, 1]
    z = coo[:, 2]

    mean, _, _, _, = scipy.stats.binned_statistic_2d(x, y, z, statistic='mean', bins=[gridx, gridy])
    zmin, _, _, _, = scipy.stats.binned_statistic_2d(x, y, z, statistic=lambda zmi: np.min(z), bins=[gridx, gridy])
    zmax, _, _, _, = scipy.stats.binned_statistic_2d(x, y, z, statistic=lambda zma: np.max(z), bins=[gridx, gridy])

    f1 = 255*scipy.special.expit(mean - point[2])
    f2 = 255*scipy.special.expit(zmin - point[2])
    f3 = 255*scipy.special.expit(zmax - point[2])
예제 #8
0
def create_featureset(points,
                      extend,
                      training,
                      sampling_rate=1,
                      img_size=32,
                      values=[]):
    """Create grid and caluclate features
    :param points: Array Vstack [x, y, z, classification] [m]
    :type points: float
    :param extend: Array [minX minY, maxX maxY]
    :type extend: float
    :param training: If this is training data True else False ( training data should have label in 4th column
    :type training: bool
    :param sampling: Values 0-1. By deafult is 1 (all data poitns), for 10% of dataset 0.1
    :type sampling: float
    :param img_size: Spatial size of feature area Default 32. Should be 2 to power of n
    :type img_size: int
    :param values: Values for Feature Stats, if non is passed height is used
    :type values: float
    """
    tree = KDTree(points[:, 0:2])
    buff = int(img_size / 2)

    if values:
        points[:, 2] = values

    features = []
    n = 0

    minX = extend[0, 0] - buff
    minY = extend[0, 1] - buff
    maxX = extend[1, 0] + buff
    maxY = extend[1, 1] + buff

    gridX = np.linspace(int(minX), int(maxX), int(maxX - minX + 1))
    gridY = np.linspace(int(minY), int(maxY), int(maxY - minY + 1))

    mean = np.zeros((len(gridX), len(gridY)))
    minm = np.zeros((len(gridX), len(gridY)))
    maxm = np.zeros((len(gridX), len(gridY)))

    for x, i in zip(gridX, range(0, len(gridX))):
        for y, j in zip(gridY, range(0, len(gridY))):
            list = tree.query_ball_point([x, y], 1.4)
            cell_ext = np.array([[x - 0.5, y - 0.5], [x + 0.5, y + 0.5]])
            cell_points = clip(points[list], cell_ext)

            if cell_points.any():

                mean[i, j] = np.mean(cell_points[:, 2])
                minm[i, j] = np.min(cell_points[:, 2])
                maxm[i, j] = np.max(cell_points[:, 2])

    f1 = 255 * scipy.special.expit(mean)
    f2 = 255 * scipy.special.expit(minm)
    f3 = 255 * scipy.special.expit(maxm)

    f = np.array([f3, f2, f1], dtype=np.uint8)
    scipy.misc.toimage(
        f, cmin=0.0,
        cmax=255).save('/media/nejc/Prostor/Dropbox/dev/Data/outfile.jpg')

    if sampling_rate != 1:
        orig_point_count = len(points)
        points = points[downsample(len(points), sampling_rate)]
        print('Processing {0} procent of points ({1} of {2})'.format(
            sampling_rate * 100, len(points), orig_point_count))
    else:
        print('Processing all {0} points'.format(len(points)))

    for point in points:

        n += 1

        centerx = len(gridX[gridX < point[0]])
        centery = len(gridY[gridY < point[1]])

        feature = np.empty((img_size, img_size, 3), 'uint8')

        feature[..., 0] = 255 * scipy.special.expit(mean[
            (centerx - buff):(centerx + buff),
            (centery - buff):(centery + buff)] - point[2])
        feature[..., 1] = 255 * scipy.special.expit(minm[
            (centerx - buff):(centerx + buff),
            (centery - buff):(centery + buff)] - point[2])
        feature[..., 2] = 255 * scipy.special.expit(maxm[
            (centerx - buff):(centerx + buff),
            (centery - buff):(centery + buff)] - point[2])

        if training:
            if int(point[3] != 2):
                features.append((feature, [0, 1]))
            elif int(point[3] == 2):
                features.append((feature, [1, 0]))

            #scipy.misc.toimage(feature, cmin=0.0, cmax=255).save('feat_out\outfile{0}.jpg'.format(n))

    return features
예제 #9
0
def match_centroids(c1, c2, max_distance, inf=100000.):
    """Find the best matching of centroids in c1 to centroids in c2

    Match centroids in `c1` to those in `c2`, minimizing total distance between
    pairs with the constraint that no match is further away than `max_distance`.

    Parameters
    ----------
    c1 : array
        an N1xM array of centroid coordinates (M is the dimension of the volume).
    c2 : array
        another N2xM array of centroid coordinates
    max_distance : float
        the maximum allowed distance between pairs
    inf : float
        a ridiculously large distance to use in place of true infinity

    Returns
    -------
    c1_idxs : array
        the index of the matching centroid in `c2` for each `c1`. Index -1 means no match.
    c2_idxs : array
        the index of the matching centroid in `c1` for each `c2`. Index of -1 means no match.

    """
    #
    # The matrix consists of rows of c1 and alternatives for c2
    # and columns for c2 and alternatives for c1.
    #
    matrix = np.ones((len(c1) + len(c2), len(c2) + len(c1))) * inf
    #
    # Compile pairs less than the max distance
    #
    kdtree = KDTree(c1)
    c2_matches = kdtree.query_ball_point(c2, max_distance)
    for c2_idx, c1s in enumerate(c2_matches):
        if len(c1s) == 0:
            continue
        d = np.sqrt(
            np.sum((c1[np.array(c1s)] - c2[c2_idx][np.newaxis, :])**2, 1))
        for c1_idx, dd in zip(c1s, d):
            matrix[c1_idx, c2_idx] = dd * 2
    #
    # Connect c1 to its alternative
    #
    matrix[np.arange(len(c1)), np.arange(len(c1)) + len(c2)] = max_distance
    #
    # Connect c2 to its alternative
    #
    matrix[np.arange(len(c2)) + len(c1), np.arange(len(c2))] = max_distance
    #
    # There is no penalty for connecting alternatives to each other whatever
    # way can be done.
    #
    matrix[len(c1):, len(c2):] = 0
    #
    # Run munkres algorithm to do assignment
    #
    # c1_result, c2_result = linear_sum_assignment(matrix)
    c1_result, c2_result = solve_dense(
        matrix)  # lapsolver is much faster than scipy

    #
    # The return values: initially -1
    #
    c1_idxs = -np.ones(len(c1), np.int32)
    c2_idxs = -np.ones(len(c2), np.int32)
    mask = (c1_result < len(c1)) & (c2_result < len(c2))
    c1_idxs[c1_result[mask]] = c2_result[mask]
    c2_idxs[c2_result[mask]] = c1_result[mask]
    return c1_idxs, c2_idxs