Пример #1
0
    def testRectQuery(self):
        xs = [TstO(r) for r in take(1000, G.rect, 0.01)]
        rt = RTree()
        for x in xs:
            rt.insert(x, x.rect)
            self.invariants(rt)

        for x in xs:
            qrect = G.intersectingWith(x.rect)
            orect = G.disjointWith(x.rect)
            self.assertTrue(qrect.does_intersect(x.rect))
            p = G.pointInside(x.rect)
            res = list([ro.leaf_obj() for ro in rt.query_point(p)])
            self.invariants(rt)
            self.assertTrue(x in res)
            res2 = list([r.leaf_obj() for r in rt.query_rect(qrect)])
            self.assertTrue(x in res2)
            rres = list([r.leaf_obj() for r in rt.query_rect(orect)])
            self.assertFalse(x in rres)
Пример #2
0
    def testRectQuery(self):
        xs = [ TstO(r) for r in take(1000, G.rect, 0.01) ]
        rt = RTree()
        for x in xs: 
            rt.insert(x,x.rect)
            self.invariants(rt)

        for x in xs:
            qrect = G.intersectingWith(x.rect)
            orect = G.disjointWith(x.rect)
            self.assertTrue(qrect.does_intersect(x.rect))
            p = G.pointInside(x.rect)
            res = list([ro.leaf_obj() for ro in rt.query_point(p)])
            self.invariants(rt)
            self.assertTrue(x in res)
            res2 = list([r.leaf_obj() for r in rt.query_rect(qrect)])
            self.assertTrue(x in res2)
            rres = list([r.leaf_obj() for r in rt.query_rect(orect)])
            self.assertFalse(x in rres)
Пример #3
0
class MultipleTilesAffineRenderer:
    BLEND_TYPE = {"NO_BLENDING": 0, "AVERAGING": 1, "LINEAR": 2}

    def __init__(self, single_tiles, blend_type="NO_BLENDING"):
        """Receives a number of image paths, and for each a transformation matrix"""
        self.blend_type = self.BLEND_TYPE[blend_type]
        self.single_tiles = single_tiles
        # Create an RTree of the bounding boxes of the tiles
        self.rtree = RTree()
        for t in self.single_tiles:
            bbox = t.get_bbox()
            # pyrtree uses the (x_min, y_min, x_max, y_max) notation
            self.rtree.insert(t, Rect(bbox[0], bbox[2], bbox[1], bbox[3]))
        #should_compute_mask = False if self.blend_type == 0 else True
        #self.single_tiles = [SingleTileAffineRenderer(img_path, img_shape[1], img_shape[0], compute_mask=should_compute_mask) for img_path, img_shape in zip(img_paths, img_shapes)]
        #for i, matrix in enumerate(transform_matrices):
        #    self.single_tiles[i].add_transformation(matrix)

    def add_transformation(self, transform_matrix):
        """Adds a transformation to all tiles"""
        self.rtree = RTree()
        for single_tile in self.single_tiles:
            single_tile.add_transformation(transform_matrix)
            bbox = single_tile.get_bbox()
            # pyrtree uses the (x_min, y_min, x_max, y_max) notation
            self.rtree.insert(single_tile,
                              Rect(bbox[0], bbox[2], bbox[1], bbox[3]))

    def render(self):
        if len(self.single_tiles) == 0:
            return None, None

        # Render all tiles by finding the bounding box, and using crop
        all_bboxes = np.array([t.get_bbox() for t in self.single_tiles]).T
        bbox = [
            np.min(all_bboxes[0]),
            np.max(all_bboxes[1]),
            np.min(all_bboxes[2]),
            np.max(all_bboxes[3])
        ]
        crop, start_point = self.crop(bbox[0], bbox[2], bbox[1], bbox[3])
        return crop, start_point

    def crop(self, from_x, from_y, to_x, to_y):
        if len(self.single_tiles) == 0:
            return None, None

        # Distinguish between the different types of blending
        if self.blend_type == 0:  # No blending
            res = np.zeros(
                (round(to_y + 1 - from_y), round(to_x + 1 - from_x)),
                dtype=np.uint8)
            # render only relevant parts, and stitch them together
            # filter only relevant tiles using rtree
            rect_res = self.rtree.query_rect(Rect(from_x, from_y, to_x, to_y))
            for rtree_node in rect_res:
                if not rtree_node.is_leaf():
                    continue
                t = rtree_node.leaf_obj()
                t_img, t_start_point, _ = t.crop(from_x, from_y, to_x, to_y)
                if t_img is not None:
                    res[t_start_point[1] - from_y:t_img.shape[0] +
                        (t_start_point[1] - from_y),
                        t_start_point[0] - from_x:t_img.shape[1] +
                        (t_start_point[0] - from_x)] = t_img

        elif self.blend_type == 1:  # Averaging
            # Do the calculation on a uint16 image (for overlapping areas), and convert to uint8 at the end
            res = np.zeros(
                (round(to_y + 1 - from_y), round(to_x + 1 - from_x)),
                dtype=np.uint16)
            res_mask = np.zeros(
                (round(to_y + 1 - from_y), round(to_x + 1 - from_x)),
                dtype=np.uint8)

            # render only relevant parts, and stitch them together
            # filter only relevant tiles using rtree
            rect_res = self.rtree.query_rect(Rect(from_x, from_y, to_x, to_y))
            for rtree_node in rect_res:
                if not rtree_node.is_leaf():
                    continue
                t = rtree_node.leaf_obj()
                t_img, t_start_point, t_mask = t.crop(from_x, from_y, to_x,
                                                      to_y)
                if t_img is not None:
                    res[t_start_point[1] - from_y:t_img.shape[0] +
                        (t_start_point[1] - from_y),
                        t_start_point[0] - from_x:t_img.shape[1] +
                        (t_start_point[0] - from_x)] += t_img
                    #t_start_point[0] - from_x: t_img.shape[1] + t_start_point[0] - from_x] += t_mask * 50
                    res_mask[t_start_point[1] - from_y:t_img.shape[0] +
                             (t_start_point[1] - from_y),
                             t_start_point[0] - from_x:t_img.shape[1] +
                             (t_start_point[0] - from_x)] += t_mask

            # Change the values of 0 in the mask to 1, to avoid division by 0
            res_mask[res_mask == 0] = 1
            res = res / res_mask
            res = res.astype(np.uint8)

        elif self.blend_type == 2:  # Linear averaging
            # Do the calculation on a uint32 image (for overlapping areas), and convert to uint8 at the end
            # For each pixel use the min-distance to an edge as a weight, and store the
            # average the outcome according to the weight
            res = np.zeros(
                (round(to_y + 1 - from_y), round(to_x + 1 - from_x)),
                dtype=np.uint32)
            res_weights = np.zeros(
                (round(to_y + 1 - from_y), round(to_x + 1 - from_x)),
                dtype=np.uint16)

            # render only relevant parts, and stitch them together
            # filter only relevant tiles using rtree
            rect_res = self.rtree.query_rect(Rect(from_x, from_y, to_x, to_y))
            for rtree_node in rect_res:
                if not rtree_node.is_leaf():
                    continue
                t = rtree_node.leaf_obj()
                t_img, t_start_point, t_weights = t.crop_with_distances(
                    from_x, from_y, to_x, to_y)
                if t_img is not None:
                    res[t_start_point[1] - from_y:t_img.shape[0] +
                        (t_start_point[1] - from_y),
                        t_start_point[0] - from_x:t_img.shape[1] +
                        (t_start_point[0] - from_x)] += t_img * t_weights
                    res_weights[t_start_point[1] - from_y:t_img.shape[0] +
                                (t_start_point[1] - from_y),
                                t_start_point[0] - from_x:t_img.shape[1] +
                                (t_start_point[0] - from_x)] += t_weights

            # Change the weights that are 0 to 1, to avoid division by 0
            res_weights[res_weights < 1] = 1
            res = res / res_weights
            res = res.astype(np.uint8)

        return res, (from_x, from_y)
Пример #4
0
recty2 = 1040

tempx1 = -1000.0
tempy1 = 1030.0
tempx2 = -972.5
tempy2 = 1000.0
wi = rectx2 - rectx1
he = recty2 - recty1

p = plt.Rectangle((rectx1, recty1), wi, he, angle=0, fill=False)
ax.add_patch(p)

# '''
# real_point_res = [r.leaf_obj() for r in t.query_rect( (2,4,2.5,4.5) ) if r.is_leaf()]
for x123 in range(0, 10):
    rect_res = t.query_rect(Rect(rectx1, recty1, rectx2, recty2))
    rectx1 -= 20
    recty1 -= 50
    rectx2 += 20
    recty2 += 50
    #print "iteration "+str(x123)
    #print "x1"+str(x1)
    #print "y1"+str(y1)

    # wi=rectx2-rectx1
    # he=recty2-recty1
    #
    # p = plt.Rectangle(
    # (rectx1, recty1), wi, he,angle=0 ,fill=False
    #
    # )
Пример #5
0
# PyRTree Insert and Search Test. This PyRTree is query only index.

obj = []
for x in range(DATA_SIZE):
    # Create objects with rectangles for objs, Rect(0, 0, 1, 1), Rect(1, 1, 2, 2); name obj0, obj1, ...
    obj.append([Rect(0 + x, 0 + x, 1 + x, 1 + x), 'obj' + x.__str__()])

rtree = RTree() # tree creation

# pyrtree uses the Rect (x_min, y_min, x_max, y_max) notation
for x in range(DATA_SIZE):
    rtree.insert(obj[x], obj[x][0]) # element insertion with a given box

# Query the tree
rect_res = rtree.query_rect(Rect(1, 1, 4, 4) )

# Traverse the query result
for rtree_node in rect_res:
    if not rtree_node.is_leaf():
        continue
    t = rtree_node.leaf_obj()
    print(t[0].x, t[0].y, t[0].xx, t[0].yy, t[1])

# Get the internal nodes which are at the deepest level of the tree. Each internal contains a number of leaf nodes
for nodes in rtree.get_last_level():
    print(nodes)

# Print all
for nodes in rtree.get_last_level():
    for node in nodes:
class MultipleTilesRenderer:
    BLEND_TYPE = {
            "NO_BLENDING" : 0,
            "AVERAGING" : 1,
            "LINEAR" : 2
        }
    def __init__(self, single_tiles, blend_type="NO_BLENDING"):
        """Receives a number of image paths, and for each a transformation matrix"""
        self.blend_type = self.BLEND_TYPE[blend_type]
        self.single_tiles = single_tiles
        # Create an RTree of the bounding boxes of the tiles
        self.rtree = RTree()
        for t in self.single_tiles:
            bbox = t.get_bbox()
            # pyrtree uses the (x_min, y_min, x_max, y_max) notation
            self.rtree.insert(t, Rect(bbox[0], bbox[2], bbox[1], bbox[3]))
        #should_compute_mask = False if self.blend_type == 0 else True
        #self.single_tiles = [SingleTileAffineRenderer(img_path, img_shape[1], img_shape[0], compute_mask=should_compute_mask) for img_path, img_shape in zip(img_paths, img_shapes)]
        #for i, matrix in enumerate(transform_matrices):
        #    self.single_tiles[i].add_transformation(matrix)

    def add_transformation(self, model):
        """Adds a transformation to all tiles"""
        self.rtree = RTree()
        for single_tile in self.single_tiles:
            single_tile.add_transformation(model)
            bbox = single_tile.get_bbox()
            # pyrtree uses the (x_min, y_min, x_max, y_max) notation
            self.rtree.insert(single_tile, Rect(bbox[0], bbox[2], bbox[1], bbox[3]))
 
        
    def render(self):
        if len(self.single_tiles) == 0:
            return None, None

        # Render all tiles by finding the bounding box, and using crop
        all_bboxes = np.array([t.get_bbox() for t in self.single_tiles]).T
        bbox = [np.min(all_bboxes[0]), np.max(all_bboxes[1]), np.min(all_bboxes[2]), np.max(all_bboxes[3])]
        crop, start_point = self.crop(bbox[0], bbox[2], bbox[1], bbox[3])
        return crop, start_point

    def crop(self, from_x, from_y, to_x, to_y):
        if len(self.single_tiles) == 0:
            return None, None

        # Distinguish between the different types of blending
        if self.blend_type == 0: # No blending
            res = np.zeros((round(to_y + 1 - from_y), round(to_x + 1 - from_x)), dtype=np.uint8)
            # render only relevant parts, and stitch them together
            # filter only relevant tiles using rtree
            rect_res = self.rtree.query_rect( Rect(from_x, from_y, to_x, to_y) )
            for rtree_node in rect_res:
                if not rtree_node.is_leaf():
                    continue
                t = rtree_node.leaf_obj()
                t_img, t_start_point, _ = t.crop(from_x, from_y, to_x, to_y)
                if t_img is not None:
                    res[t_start_point[1] - from_y: t_img.shape[0] + (t_start_point[1] - from_y),
                        t_start_point[0] - from_x: t_img.shape[1] + (t_start_point[0] - from_x)] = t_img

        elif self.blend_type == 1: # Averaging
            # Do the calculation on a uint16 image (for overlapping areas), and convert to uint8 at the end
            res = np.zeros(
                (round(to_y + 1 - from_y), round(to_x + 1 - from_x)), 
                np.float32)
            res_mask = np.zeros(
                (round(to_y + 1 - from_y), round(to_x + 1 - from_x)),
                np.float32)

            # render only relevant parts, and stitch them together
            # filter only relevant tiles using rtree
            rect_res = self.rtree.query_rect( Rect(from_x, from_y, to_x, to_y) )
            for rtree_node in rect_res:
                if not rtree_node.is_leaf():
                    continue
                t = rtree_node.leaf_obj()
                t_img, t_start_point, t_mask = t.crop(from_x, from_y, to_x, to_y)
                if t_img is not None:
                    t_mask, _, _ = AlphaTileRenderer(t).crop(
                        from_x, from_y, to_x, to_y)
                    res[t_start_point[1] - from_y: t_img.shape[0] + (t_start_point[1] - from_y),
                        t_start_point[0] - from_x: t_img.shape[1] + (t_start_point[0] - from_x)] += t_img
                    res_mask[t_start_point[1] - from_y: t_img.shape[0] + (t_start_point[1] - from_y),
                             t_start_point[0] - from_x: t_img.shape[1] + (t_start_point[0] - from_x)] += t_mask

            # Change the values of 0 in the mask to 1, to avoid division by 0
            res_mask[res_mask == 0] = 1
            res = res / res_mask
            res = np.maximum(0, np.minimum(255, res)).astype(np.uint8)

        elif self.blend_type == 2: # Linear averaging
            # Do the calculation on a uint32 image (for overlapping areas), and convert to uint8 at the end
            # For each pixel use the min-distance to an edge as a weight, and store the
            # average the outcome according to the weight
            res = np.zeros((round(to_y + 1 - from_y), round(to_x + 1 - from_x)), dtype=np.uint32)
            res_weights = np.zeros((round(to_y + 1 - from_y), round(to_x + 1 - from_x)), dtype=np.uint16)

            # render only relevant parts, and stitch them together
            # filter only relevant tiles using rtree
            rect_res = self.rtree.query_rect( Rect(from_x, from_y, to_x, to_y) )
            for rtree_node in rect_res:
                if not rtree_node.is_leaf():
                    continue
                t = rtree_node.leaf_obj()
                t_img, t_start_point, t_weights = t.crop_with_distances(from_x, from_y, to_x, to_y)
                if t_img is not None:
                    print "actual image start_point:", t_start_point, "and shape:", t_img.shape
                    res[t_start_point[1] - from_y: t_img.shape[0] + (t_start_point[1] - from_y),
                        t_start_point[0] - from_x: t_img.shape[1] + (t_start_point[0] - from_x)] += t_img * t_weights
                    res_weights[t_start_point[1] - from_y: t_img.shape[0] + (t_start_point[1] - from_y),
                                t_start_point[0] - from_x: t_img.shape[1] + (t_start_point[0] - from_x)] += t_weights

            # Change the weights that are 0 to 1, to avoid division by 0
            res_weights[res_weights < 1] = 1
            res = res / res_weights
            res = np.maximum(0, np.minimum(255, res)).astype(np.uint8)

        return res, (from_x, from_y)