def segment_glomeruli2d(input_file, tissue_mask_file, output_file, voxel_xy):
    kmask = io.imread(tissue_mask_file)
    if kmask.max() == 0:
        tifffile.imsave(output_file, kmask, compress=5)
        return

    # normalize image
    img = io.imread(input_file)
    img = ndimage.median_filter(img, 3)
    img = img * 255. / img.max()

    # remove all intensity variations larger than maximum radius of a glomerulus
    d = mahotas.disk(int(float(glomeruli_maxrad) / voxel_xy))
    img = img - mahotas.open(img.astype(np.uint8), d)
    img = img * 255. / img.max()
    ch = img[np.where(kmask > 0)]

    # segment glomeruli by otsu thresholding	only if this threshold is higher than the 75-th percentile in the kidney mask
    t = mahotas.otsu(img.astype(np.uint8))

    cells = None

    if t > np.percentile(ch, 75) * 1.5:
        cells = img > t
        cells[np.where(kmask == 0)] = 0
        cells = mahotas.open(
            cells, mahotas.disk(int(float(glomeruli_minrad) / 2. / voxel_xy)))
    else:
        cells = np.zeros_like(img)

    tifffile.imsave(output_file, img_as_ubyte(cells), compress=5)
Пример #2
0
def watershedSegment(image, diskSize=20):
    gradmag = gradientMagnitudue(image)

    ## compute foreground markers

    # open image to create flat regions at cell centers
    se_disk = pymorph.sedisk(diskSize) 
    image_opened = mahotas.open(image, se_disk);

    # define foreground markers as regional maxes of cells
    # this step is slow!
    foreground_markers = mahotas.regmax(image_opened)

    ## compute background markers

    # Threshold the image, cast it to the right datatype, and then calculate the distance image
    image_black_white = image_opened > mahotas.otsu(image_opened)
    image_black_white = image_black_white.astype('uint16')

    # note the inversion here- a key difference from the matlab algorithm
    # matlab distance is to nearest non-zero pixel
    # python distance is to nearest 0 pixel
    image_distance = pymorph.to_uint16(nd.distance_transform_edt(np.logical_not(image_black_white)))
    eight_conn = pymorph.sebox()

    distance_markers = mahotas.label(mahotas.regmin(image_distance, eight_conn))[0]
    image_dist_wshed, image_dist_wshed_lines =mahotas.cwatershed(image_distance, distance_markers, eight_conn, return_lines=True)
    background_markers = image_distance_watershed_lines - image_black_white

    all_markers = np.logical_or(foreground_markers, background_markers)

    # impose a min on the gradient image.  assumes int64
    gradmag2 = imimposemin(gradmag.astype(int), all_markers, eight_conn)

    # call watershed
    segmented_cells, segmented_cell_lines = mahotas.cwatershed(gradmag2, mahotas.label(all_markers)[0], eight_conn, return_lines=True)

    # seperate watershed regions
    segmented_cells[gradientMagnitudue(segmented_cells) > 0] = 0
    return segmented_cells > 0, segmented_cells
#
# It is made available under the MIT License

import numpy as np
import mahotas as mh

# Load our example image:
image = mh.imread('../SimpleImageDataset/building05.jpg')

# Convert to greyscale
image = mh.colors.rgb2gray(image, dtype=np.uint8)

# Compute a threshold value:
thresh = mh.thresholding.otsu(image)
print('Otsu threshold is {0}'.format(thresh))

# Compute the thresholded image
otsubin = (image > thresh)
print('Saving thresholded image (with Otsu threshold) to otsu-threshold.jpeg')
mh.imsave('otsu-threshold.jpeg', otsubin.astype(np.uint8) * 255)

# Execute morphological opening to smooth out the edges
otsubin = mh.open(otsubin, np.ones((15, 15)))
mh.imsave('otsu-closed.jpeg', otsubin.astype(np.uint8) * 255)

# An alternative thresholding method:
thresh = mh.thresholding.rc(image)
print('Ridley-Calvard threshold is {0}'.format(thresh))
print('Saving thresholded image (with Ridley-Calvard threshold) to rc-threshold.jpeg')
mh.imsave('rc-threshold.jpeg', (image > thresh).astype(np.uint8) * 255)
Пример #4
0
    def extract_polygons(self, y_offset, x_offset):
        '''Creates a polygon representation for each segmented object.
        The coordinates of the polygon contours are relative to the global map,
        i.e. an offset is added to the :class:`Site <tmlib.models.site.Site>`.

        Parameters
        ----------
        y_offset: int
            global vertical offset that needs to be subtracted from
            *y*-coordinates (*y*-axis is inverted)
        x_offset: int
            global horizontal offset that needs to be added to *x*-coordinates

        Returns
        -------
        Generator[Tuple[Union[int, shapely.geometry.polygon.Polygon]]]
            label and geometry for each segmented object
        '''
        bboxes = mh.labeled.bbox(self.array)
        # We set border pixels to zero to get closed contours for
        # border objects. This may cause problems for very small objects
        # at the border, because they may get lost.
        # We recreate them later on (see below).
        plane = self.array.copy()
        plane[0, :] = 0
        plane[-1, :] = 0
        plane[:, 0] = 0
        plane[:, -1] = 0

        for label in np.unique(plane[plane > 0]):
            bbox = bboxes[label]
            obj_im = self._get_bbox_image(plane, bbox)
            logger.debug('find contour for object #%d', label)
            # We could do this for all objects at once, but doing it on the
            # bounding box for each object individually ensures that we get the
            # correct number of objects and that polygons are in the
            # correct order, i.e. sorted according to their corresponding label.
            mask = obj_im == label
            if np.sum(mask > 0) > 1:
                # We need to remove single pixel extensions on the border of
                # objects because they can lead to polygon self-intersections.
                # However, this should only be done if the object is larger
                # than 1 pixel.
                mask = mh.open(mask)
            # NOTE: OpenCV returns x, y coordinates. This means one would need
            # to flip the axis for numpy-based indexing (y,x coordinates).
            _, contours, hierarchy = cv2.findContours(
                (mask).astype(np.uint8) * 255,
                cv2.RETR_CCOMP,  # two-level hierarchy (holes)
                cv2.CHAIN_APPROX_NONE)
            if len(contours) == 0:
                logger.warn('no contours identified for object #%d', label)
                # This is most likely an object that does not extend
                # beyond the line of border pixels.
                # To ensure a correct number of objects we represent
                # it by the smallest possible valid polygon.
                coords = np.array(np.where(plane == label)).T
                y, x = np.mean(coords, axis=0).astype(int)
                shell = np.array([[x - 1, x + 1, x + 1, x - 1, x - 1],
                                  [y - 1, y - 1, y + 1, y + 1, y - 1]]).T
                holes = None
            elif len(contours) > 1:
                # It may happens that more than one contour is
                # identified per object, for example if the object
                # has holes, i.e. enclosed background pixels.
                logger.debug('%d contours identified for object #%d',
                             len(contours), label)
                holes = list()
                for i in range(len(contours)):
                    child_idx = hierarchy[0][i][2]
                    parent_idx = hierarchy[0][i][3]
                    # There should only be two levels with one
                    # contour each.
                    if parent_idx >= 0:
                        shell = np.squeeze(contours[parent_idx])
                    elif child_idx >= 0:
                        holes.append(np.squeeze(contours[child_idx]))
                    else:
                        # Same hierarchy level. This shouldn't happen.
                        # Take only the largest one.
                        lengths = [len(c) for c in contours]
                        idx = lengths.index(np.max(lengths))
                        shell = np.squeeze(contours[idx])
                        break
            else:
                shell = np.squeeze(contours[0])
                holes = None

            if shell.ndim < 2 or shell.shape[0] < 3:
                logger.warn('polygon doesn\'t have enough coordinates')
                # In case the contour cannot be represented as a
                # valid polygon we create a little square to not loose
                # the object.
                y, x = np.array(mask.shape) / 2
                # Create a closed ring with coordinates sorted
                # counter-clockwise
                shell = np.array([[x - 1, x + 1, x + 1, x - 1, x - 1],
                                  [y - 1, y - 1, y + 1, y + 1, y - 1]]).T

            # Add offset required due to alignment and cropping and
            # invert the y-axis as required by Openlayers.
            add_y = y_offset + bbox[0] - 1
            add_x = x_offset + bbox[2] - 1
            shell[:, 0] = shell[:, 0] + add_x
            shell[:, 1] = -1 * (shell[:, 1] + add_y)
            if holes is not None:
                for i in range(len(holes)):
                    holes[i][:, 0] = holes[i][:, 0] + add_x
                    holes[i][:, 1] = -1 * (holes[i][:, 1] + add_y)
            poly = shapely.geometry.Polygon(shell, holes)
            if not poly.is_valid:
                logger.warn(
                    'invalid polygon for object #%d - trying to fix it', label)
                # In some cases there may be invalid intersections
                # that can be fixed with the buffer trick.
                poly = poly.buffer(0)
                if not poly.is_valid:
                    raise ValueError('Polygon of object #%d is invalid.' %
                                     label)
                if isinstance(poly, shapely.geometry.MultiPolygon):
                    logger.warn(
                        'object #%d has multiple polygons - '
                        'take largest', label)
                    # Repair may create multiple polygons.
                    # We take the largest and discard the smaller ones.
                    areas = [g.area for g in poly.geoms]
                    index = areas.index(np.max(areas))
                    poly = poly.geoms[index]
            yield (int(label), poly)
Пример #5
0
def segment_layer(filename, params):
    '''
	Segment one layer in a stack
	'''
    start = time.time()
    #extract pixel size in xy and z
    xsize, zsize = extract_zoom(params.folder)

    #load image
    img = tifffile.imread(params.inputfolder + params.folder + filename)

    #normalize image
    img = ndimage.median_filter(img, 3)
    img = img * 255. / img.max()

    ##segment kidney tissue

    sizefactor = 10.
    small = ndimage.interpolation.zoom(
        img, 1. / sizefactor)  #scale the image to a smaller size

    imgf = ndimage.gaussian_filter(small, 3. / xsize)  #Gaussian filter
    median = np.percentile(imgf, 40)  #40-th percentile for thresholding

    kmask = imgf > median * 1.5  #thresholding
    kmask = mahotas.dilate(kmask, mahotas.disk(5))
    kmask = mahotas.close_holes(kmask)  #closing holes
    kmask = mahotas.erode(kmask, mahotas.disk(5)) * 255

    #remove objects that are darker than 2*percentile
    l, n = ndimage.label(kmask)
    llist = np.unique(l)
    if len(llist) > 2:
        means = ndimage.mean(imgf, l, llist)
        bv = llist[np.where(means < median * 2)]
        ix = np.in1d(l.ravel(), bv).reshape(l.shape)
        kmask[ix] = 0

    kmask = ndimage.interpolation.zoom(kmask,
                                       sizefactor)  #scale back to normal size
    kmask = normalize(kmask)
    kmask = (kmask > mahotas.otsu(kmask.astype(
        np.uint8))) * 255.  #remove artifacts of interpolation

    #save indices of the kidney mask
    ind = np.where(kmask > 0)
    ind = np.array(ind)
    np.save(
        params.inputfolder + '../segmented/masks/kidney/' + params.folder +
        filename[:-4] + '.npy', ind)

    #segment glomeruli, if there is a kidney tissue
    if kmask.max() > 0:
        #remove all intensity variations larger than maximum radius of a glomerulus
        d = mahotas.disk(int(float(params.maxrad) / xsize))
        img = img - mahotas.open(img.astype(np.uint8), d)
        img = img * 255. / img.max()
        ch = img[np.where(kmask > 0)]

        #segment glomeruli by otsu thresholding	only if this threshold is higher than the 75-th percentile in the kidney mask
        t = mahotas.otsu(img.astype(np.uint8))

        if t > np.percentile(ch, 75) * 1.5:
            cells = img > t
            cells[np.where(kmask == 0)] = 0
            cells = mahotas.open(
                cells, mahotas.disk(int(float(params.minrad) / 2. / xsize)))

        else:
            cells = np.zeros_like(img)

    else:
        cells = np.zeros_like(img)

    #save indices of the glomeruli mask
    ind = np.where(cells > 0)
    ind = np.array(ind)
    np.save(
        params.inputfolder + '../segmented/masks/glomeruli/' + params.folder +
        filename[:-4] + '.npy', ind)
Пример #6
0
def watershedSegment(image, diskSize=20):
    """This routine implements the watershed example from 
    http://www.mathworks.com/help/images/examples/marker-controlled-watershed-segmentation.html, 
    but using pymorph and mahotas.

    :param image: an image (2d numpy array) to be segemented
    :param diskSize: an integer used as a size for a structuring element used 
                     for morphological preprocessing.
    :returns: tuple of binarized and labeled segmention masks
    """
    def gradientMagnitudue(image):
        sobel_x = nd.sobel(image.astype('double'), 0)
        sobel_y = nd.sobel(image.astype('double'), 1)
        return np.sqrt((sobel_x * sobel_x) + (sobel_y * sobel_y))

    def imimposemin(image, mask, connectivity):
        fm = image.copy()
        fm[mask] = -9223372036854775800
        fm[np.logical_not(mask)] = 9223372036854775800

        fp1 = image + 1

        g = np.minimum(fp1, fm)

        j = infrec(fm, g)
        return j

    def infrec(f, g, Bc=None):
        if Bc is None: Bc = pymorph.secross()
        n = f.size
        return fast_conditional_dilate(f, g, Bc, n)

    def fast_conditional_dilate(f, g, Bc=None, n=1):
        if Bc is None:
            Bc = pymorph.secross()
        f = pymorph.intersec(f, g)
        for i in xrange(n):
            prev = f
            f = pymorph.intersec(mahotas.dilate(f, Bc), g)
            if pymorph.isequal(f, prev):
                break
        return f

    gradmag = gradientMagnitudue(image)

    ## compute foreground markers

    # open image to create flat regions at cell centers
    se_disk = pymorph.sedisk(diskSize)
    image_opened = mahotas.open(image, se_disk)

    # define foreground markers as regional maxes of cells
    # this step is slow!
    foreground_markers = mahotas.regmax(image_opened)

    ## compute background markers

    # Threshold the image, cast it to the right datatype, and then calculate the distance image
    image_black_white = image_opened > mahotas.otsu(image_opened)
    image_black_white = image_black_white.astype('uint16')

    # note the inversion here- a key difference from the matlab algorithm
    # matlab distance is to nearest non-zero pixel
    # python distance is to nearest 0 pixel
    image_distance = pymorph.to_uint16(
        nd.distance_transform_edt(np.logical_not(image_black_white)))
    eight_conn = pymorph.sebox()

    distance_markers = mahotas.label(mahotas.regmin(image_distance,
                                                    eight_conn))[0]
    image_dist_wshed, image_dist_wshed_lines = mahotas.cwatershed(
        image_distance, distance_markers, eight_conn, return_lines=True)
    background_markers = image_dist_wshed_lines - image_black_white

    all_markers = np.logical_or(foreground_markers, background_markers)

    # impose a min on the gradient image.  assumes int64
    gradmag2 = imimposemin(gradmag.astype(int), all_markers, eight_conn)

    # call watershed
    segmented_cells, segmented_cell_lines = mahotas.cwatershed(
        gradmag2, mahotas.label(all_markers)[0], eight_conn, return_lines=True)
    segmented_cells -= 1

    # seperate watershed regions
    segmented_cells[gradientMagnitudue(segmented_cells) > 0] = 0
    return segmented_cells > 0, segmented_cells
Пример #7
0
def watershedSegment(image, diskSize=20):

    def gradientMagnitudue(image):
        sobel_x = nd.sobel(image.astype('double'), 0)
        sobel_y = nd.sobel(image.astype('double'), 1)
        return np.sqrt((sobel_x * sobel_x) + (sobel_y * sobel_y))    

    def imimposemin(image, mask, connectivity):
        fm = image.copy()
        fm[mask] = -9223372036854775800
        fm[np.logical_not(mask)] = 9223372036854775800

        fp1 = image + 1
        
        g = np.minimum(fp1, fm)
        
        j = infrec(fm, g)
        return j

    def infrec(f, g, Bc=None):
        if Bc is None: Bc = pymorph.secross()
        n = f.size
        return fast_conditional_dilate(f, g, Bc, n);

    def fast_conditional_dilate(f, g, Bc=None, n=1):
        if Bc is None:
            Bc = pymorph.secross()
        f = pymorph.intersec(f,g)
        for i in xrange(n):
            prev = f
            f = pymorph.intersec(mahotas.dilate(f, Bc), g)
            if pymorph.isequal(f, prev):
                break
        return f

    gradmag = gradientMagnitudue(image)

    ## compute foreground markers

    # open image to create flat regions at cell centers
    se_disk = pymorph.sedisk(diskSize) 
    image_opened = mahotas.open(image, se_disk);

    # define foreground markers as regional maxes of cells
    # this step is slow!
    foreground_markers = mahotas.regmax(image_opened)

    ## compute background markers

    # Threshold the image, cast it to the right datatype, and then calculate the distance image
    image_black_white = image_opened > mahotas.otsu(image_opened)
    image_black_white = image_black_white.astype('uint16')

    # note the inversion here- a key difference from the matlab algorithm
    # matlab distance is to nearest non-zero pixel
    # python distance is to nearest 0 pixel
    image_distance = pymorph.to_uint16(nd.distance_transform_edt(np.logical_not(image_black_white)))
    eight_conn = pymorph.sebox()

    distance_markers = mahotas.label(mahotas.regmin(image_distance, eight_conn))[0]
    image_dist_wshed, image_dist_wshed_lines = mahotas.cwatershed(image_distance, distance_markers, eight_conn, return_lines=True)
    background_markers = image_dist_wshed_lines - image_black_white

    all_markers = np.logical_or(foreground_markers, background_markers)

    # impose a min on the gradient image.  assumes int64
    gradmag2 = imimposemin(gradmag.astype(int), all_markers, eight_conn)

    # call watershed
    segmented_cells, segmented_cell_lines = mahotas.cwatershed(gradmag2, mahotas.label(all_markers)[0], eight_conn, return_lines=True)
    segmented_cells -= 1
    
    # seperate watershed regions
    segmented_cells[gradientMagnitudue(segmented_cells) > 0] = 0
    return segmented_cells > 0, segmented_cells
Пример #8
0
im16 = mh.gaussian_filter(image, 16)
thresh = mh.thresholding.otsu(im16.astype(np.uint8))
threshed = (im16 > thresh)
plt.figure()
plt.imshow(threshed)
plt.title('threholded image (after blurring)')
print('Otsu threshold after blurring is {}.'.format(thresh))
mh.imsave('thresholded16.png', threshed.astype(np.uint8) * 255)
plt.show()

image = mh.imread('SimpleImageDataset/building05.jpg')
image = mh.colors.rgb2grey(image, dtype=np.uint8)

th = mh.thresholding.otsu(image)
print('Otsu threshold is {0}'.format(thresh))

otsubin = (image > thresh)
print('Saving thresholded image (with Otsu threshold) to otsu-threshold.jpeg')
mh.imsave('otsu-threshold.jpeg', otsubin.astype(np.uint8) * 255)

otsubin = mh.open(otsubin, np.ones((15, 15)))
mh.imsave('otsu-closed.jpeg', otsubin.astype(np.uint8) * 255)

th = mh.thresholding.rc(image)
print('Ridley-Calvard threshold is {0}'.format(thresh))
print(
    'Saving thresholded image (with Ridley-Calvard threshold) to rc-threshold.jpeg'
)
mh.imsave('rc-threshold.jpeg', (image > thresh).astype(np.uint8) * 255)
Пример #9
0
 def save_open_image(self, threshold, name):
     image = mh.open(self.get_binary_image(threshold), np.ones((15, 15)))
     mh.imsave(name, image.astype(np.uint8) * 255)
Пример #10
0
    def extract_polygons(self, y_offset, x_offset):
        '''Creates a polygon representation for each segmented object.
        The coordinates of the polygon contours are relative to the global map,
        i.e. an offset is added to the :class:`Site <tmlib.models.site.Site>`.

        Parameters
        ----------
        y_offset: int
            global vertical offset that needs to be subtracted from
            *y*-coordinates (*y*-axis is inverted)
        x_offset: int
            global horizontal offset that needs to be added to *x*-coordinates

        Returns
        -------
        Generator[Tuple[Union[int, shapely.geometry.polygon.Polygon]]]
            label and geometry for each segmented object
        '''
        bboxes = mh.labeled.bbox(self.array)
        # We set border pixels to zero to get closed contours for
        # border objects. This may cause problems for very small objects
        # at the border, because they may get lost.
        # We recreate them later on (see below).
        plane = self.array.copy()
        plane[0, :] = 0
        plane[-1, :] = 0
        plane[:, 0] = 0
        plane[:, -1] = 0

        for label in np.unique(plane[plane > 0]):
            bbox = bboxes[label]
            obj_im = self._get_bbox_image(plane, bbox)
            logger.debug('find contour for object #%d', label)
            # We could do this for all objects at once, but doing it on the
            # bounding box for each object individually ensures that we get the
            # correct number of objects and that polygons are in the
            # correct order, i.e. sorted according to their corresponding label.
            mask = obj_im == label
            if np.sum(mask > 0) > 1:
                # We need to remove single pixel extensions on the border of
                # objects because they can lead to polygon self-intersections.
                # However, this should only be done if the object is larger
                # than 1 pixel.
                mask = mh.open(mask)
            # NOTE: OpenCV returns x, y coordinates. This means one would need
            # to flip the axis for numpy-based indexing (y,x coordinates).
            _, contours, hierarchy = cv2.findContours(
                (mask).astype(np.uint8) * 255,
                cv2.RETR_CCOMP,  # two-level hierarchy (holes)
                cv2.CHAIN_APPROX_NONE
            )
            if len(contours) == 0:
                logger.warn('no contours identified for object #%d', label)
                # This is most likely an object that does not extend
                # beyond the line of border pixels.
                # To ensure a correct number of objects we represent
                # it by the smallest possible valid polygon.
                coords = np.array(np.where(plane == label)).T
                y, x = np.mean(coords, axis=0).astype(int)
                shell = np.array([
                    [x-1, x+1, x+1, x-1, x-1],
                    [y-1, y-1, y+1, y+1, y-1]
                ]).T
                holes = None
            elif len(contours) > 1:
                # It may happens that more than one contour is
                # identified per object, for example if the object
                # has holes, i.e. enclosed background pixels.
                logger.debug(
                    '%d contours identified for object #%d',
                    len(contours), label
                )
                holes = list()
                for i in range(len(contours)):
                    child_idx = hierarchy[0][i][2]
                    parent_idx = hierarchy[0][i][3]
                    # There should only be two levels with one
                    # contour each.
                    if parent_idx >= 0:
                        shell = np.squeeze(contours[parent_idx])
                    elif child_idx >= 0:
                        holes.append(np.squeeze(contours[child_idx]))
                    else:
                        # Same hierarchy level. This shouldn't happen.
                        # Take only the largest one.
                        lengths = [len(c) for c in contours]
                        idx = lengths.index(np.max(lengths))
                        shell = np.squeeze(contours[idx])
                        break
            else:
                shell = np.squeeze(contours[0])
                holes = None

            if shell.ndim < 2 or shell.shape[0] < 3:
                logger.warn('polygon doesn\'t have enough coordinates')
                # In case the contour cannot be represented as a
                # valid polygon we create a little square to not loose
                # the object.
                y, x = np.array(mask.shape) / 2
                # Create a closed ring with coordinates sorted
                # counter-clockwise
                shell = np.array([
                    [x-1, x+1, x+1, x-1, x-1],
                    [y-1, y-1, y+1, y+1, y-1]
                ]).T

            # Add offset required due to alignment and cropping and
            # invert the y-axis as required by Openlayers.
            add_y = y_offset + bbox[0] - 1
            add_x = x_offset + bbox[2] - 1
            shell[:, 0] = shell[:, 0] + add_x
            shell[:, 1] = -1 * (shell[:, 1] + add_y)
            if holes is not None:
                for i in range(len(holes)):
                    holes[i][:, 0] = holes[i][:, 0] + add_x
                    holes[i][:, 1] = -1 * (holes[i][:, 1] + add_y)
            poly = shapely.geometry.Polygon(shell, holes)
            if not poly.is_valid:
                logger.warn(
                    'invalid polygon for object #%d - trying to fix it',
                    label
                )
                # In some cases there may be invalid intersections
                # that can be fixed with the buffer trick.
                poly = poly.buffer(0)
                if not poly.is_valid:
                    raise ValueError(
                        'Polygon of object #%d is invalid.' % label
                    )
                if isinstance(poly, shapely.geometry.MultiPolygon):
                    logger.warn(
                        'object #%d has multiple polygons - '
                        'take largest', label
                    )
                    # Repair may create multiple polygons.
                    # We take the largest and discard the smaller ones.
                    areas = [g.area for g in poly.geoms]
                    index = areas.index(np.max(areas))
                    poly = poly.geoms[index]
            yield (int(label), poly)