Exemple #1
0
def wsi_to_jpg(sample_path, output_path):
    slide = OpenSlide(sample_path)
    w, h = slide.dimensions
    new_w = math.floor(w / SCALE_FACTOR)
    new_h = math.floor(h / SCALE_FACTOR)
    level = slide.get_best_level_for_downsample(SCALE_FACTOR)
    pil_img = slide.read_region(
        (0, 0), level, slide.level_dimensions[level]).convert("RGB").resize(
            (new_w, new_h), PIL.Image.BILINEAR)
    pil_img.save(output_path)
Exemple #2
0
def save_slide_cutting(
    file_path,
    multiple,
    # save_path
):
    slide = OpenSlide(file_path)
    slide_downsamples = slide.get_best_level_for_downsample(multiple)
    downsample = slide.level_downsamples[slide_downsamples]
    w_lv_, h_lv_ = slide.level_dimensions[slide_downsamples]
    wsi_pil_lv_ = slide.read_region((0, 0), slide_downsamples, (w_lv_, h_lv_))
    wsi_ary_lv_ = np.array(wsi_pil_lv_)
    wsi_bgr_lv_ = cv2.cvtColor(wsi_ary_lv_, cv2.COLOR_RGBA2BGR)

    downsample = multiple / downsample
    w = int(w_lv_ / downsample)
    h = int(h_lv_ / downsample)
    img = cv2.resize(wsi_bgr_lv_, (w, h), interpolation=cv2.INTER_LINEAR)
    # cv2.imwrite(img, save_path)
    return img
Exemple #3
0
def read_openslide_tile(slide: openslide.OpenSlide, downsample, tile_rect):
    level = slide.get_best_level_for_downsample(downsample)
    tile = slide.read_region((tile_rect[0], tile_rect[1]), level, (tile_rect[2], tile_rect[3]))
    return tile
    def __downscale_slide(self, slide: OpenSlide, slide_path: Path):
        # logging.info("Downscaling a slide...")
        original_width, original_height = slide.dimensions
        # logging.info(f"original dims: {(original_width, original_height)}")

        if self._min_dim_size:
            downscale_factor = self._calc_downscale_factor(slide)
        else:
            downscale_factor = self._downscale_factor

        # logging.info(f"downscale factor: {downscale_factor}")

        level = slide.get_best_level_for_downsample(downscale_factor)

        # logging.info(f"level: {level}")

        level_width, level_height = slide.level_dimensions[level]
        # logging.info(f"level width={level_width}, height={level_height}")

        # logging.info("calc target dims...")
        target_width, target_height = self._calc_downscaled_sizes(
            slide, downscale_factor)
        # logging.info(f"target dims = {(target_width, target_height)}")

        if level_width * level_height < 1e9:
            # logging.info("Reading region...")
            whole_slide_image = slide.read_region(location=(0, 0),
                                                  level=level,
                                                  size=(level_width,
                                                        level_height))

            # logging.info("Converting to RGB...")
            if self._new_slide_ext.lower() in self._RGB_EXTENSIONS:
                whole_slide_image = whole_slide_image.convert("RGB")

            # logging.info("Resizing...")
            whole_slide_image = whole_slide_image.resize(
                (target_width, target_height), PIL.Image.ANTIALIAS)

            # logging.info("Calculating path...")
            output_path = self._calc_new_wsi_path(slide_path)
            output_path.parent.mkdir(parents=True, exist_ok=True)
            # logging.info("Saving...")
            whole_slide_image.save(output_path)
        else:
            n_vertical_splits = self._find_best_n_splits(level_height)
            n_horizontal_splits = self._find_best_n_splits(level_width)

            # logging.info(f"n_vertical_splits={n_vertical_splits}, n_horizontal_splits={n_horizontal_splits}")

            original_tile_height = math.ceil(original_height /
                                             n_vertical_splits)
            original_tile_width = math.ceil(original_width /
                                            n_horizontal_splits)

            level_tile_height = math.ceil(level_height / n_vertical_splits)
            level_tile_width = math.ceil(level_width / n_horizontal_splits)

            # logging.info(f"level_tile_width:{level_width / n_horizontal_splits} "
            #              f"approx:{math.ceil(level_width / n_horizontal_splits)}")

            # target_tile_height = math.ceil(target_height / n_vertical_splits)
            # target_tile_width = math.ceil(target_width / n_horizontal_splits)

            for i in range(n_vertical_splits):
                for j in range(n_horizontal_splits):
                    y_i = original_tile_height * i
                    x_j = original_tile_width * j
                    # logging.info(f"x_j={x_j}, y_i={y_i}")

                    level_height_i = min(level_tile_height,
                                         level_height - level_tile_height * i)
                    # target_height_i = min(target_tile_height, target_tile_height * (n_vertical_splits - 1))

                    level_width_j = min(level_tile_width,
                                        level_width - level_tile_width * j)
                    # target_width_j = min(target_tile_width, target_tile_width * (n_horizontal_splits - 1))

                    # logging.info(f"level height_i={level_height_i}, width_j={level_width_j}")
                    # logging.info(f"target height_i={target_height_i}, width_j={target_width_j}")

                    # logging.info(f"Reading region {i}_{j}...")
                    whole_slide_image_tile = slide.read_region(
                        location=(x_j, y_i),
                        level=level,
                        size=(level_width_j, level_height_i))

                    # logging.info(f"Converting to RGB {i}_{j}...")
                    whole_slide_image_tile = whole_slide_image_tile.convert(
                        "RGB")

                    # logging.info("Resizing...")
                    # whole_slide_image_tile = whole_slide_image_tile.resize((target_width_j, target_height_i),
                    #                                                        PIL.Image.ANTIALIAS)

                    # logging.info("Calculating path...")
                    output_path = self._calc_new_wsi_path(slide_path,
                                                          part=f"{i}_{j}")
                    output_path.parent.mkdir(parents=True, exist_ok=True)
                    # logging.info("Saving...")
                    whole_slide_image_tile.save(output_path)
Exemple #5
0
def segment_region(
        wsi: openslide.OpenSlide,
        scale_factor: float = 1 / 64,
        classes: int = 3,
        smooth: bool = False,
        fill_holes: bool = False,
        plot: str = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    """
    Séparer le tissu du fond blanc en utilisant la méthode de Otsu.
    Paramètres:
        scale_factor: float entre 0 et 1 quantifiant le niveau de dézoom.
                      Si égal à 1, la lame entière au plus haut niveau de 
                      résolution est chargée en mémoire !
        classes     : nombre de classes dans la méthode de seuillage d'Otsu.
                      Voir pour plus de détails:
                      https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_multiotsu.html
                      Par défaut, 3 classes de pixels sont extraits de la lames.
        smooth      : lissage du masque de segmentation (érosion puis dilatation).
                      Utile pour extraire des tissus peu denses.
        fill_holes  : remplir les "trous" du masque de segmentation.
        plot        : affichage des différentes étapes de segmentation 'all channels' ou 'lab only'
    """
    scale_factor = 1 / wsi.level_downsamples[
        -1] if not scale_factor else scale_factor
    # choix du niveau de résolution le plus approprié en fonction du
    # zoom spécifié par l'utilisateur
    level = wsi.get_best_level_for_downsample(1 / scale_factor)
    # conversion de la lame en matrice numpy
    img = wsi_to_numpy(wsi, level)

    # initialisation du masque de segmentation
    def get_mask(channel: str = 'lab') -> List[np.ndarray]:
        """Calcule un masque de segmentation.
        Si channel = 'lab', convertit au préalable l'image dans l'espace LAB.
        Sinon, utilise le canal rouge, vert ou bleu pour calculer le ou les
        seuils optimaux de séparation des différentes couches de l'image."""
        # conversion du RGB au LAB
        # plus de détails ici : https://en.wikipedia.org/wiki/CIELAB_color_space
        # et ici pour la méthode de calcul:
        # https://docs.opencv.org/3.4/de/d25/imgproc_color_conversions.html#color_convert_rgb_lab
        new_img = img.copy()
        mask = np.ones((img.shape[:2]))
        # application de la méthode d'Otsu sur l'image dans l'espace de couleurs LAB
        # ou sur les canaux Rouge, Vert ou Bleu de l'espace RGB.
        if channel == 'A (LAB)':
            lab = cv2.cvtColor(new_img, cv2.COLOR_BGR2LAB)[..., 1]
            _t = threshold_multiotsu(lab, classes)[0]
        elif channel == 'rouge (RGB)':
            lab = new_img[..., 0]
            _t = threshold_multiotsu(lab, classes)[1]
        elif channel == 'vert (RGB)':
            lab = new_img[..., 1]
            _t = threshold_multiotsu(lab, classes)[1]
        elif channel == 'bleu (RGB)':
            lab = new_img[..., 2]
            _t = threshold_multiotsu(lab, classes)[1]
        # définition du masque de segmentation
        if channel == 'A (LAB)':
            mask = 1 - (lab < _t) * 1
        else:
            mask = (lab < _t) * 1
            lab = 1 - lab
        # les pixels du fond sont codés en RGB comme (255, 255, 255)
        new_img[np.where(mask == 0)] = 255
        if smooth:
            mask = binary_closing(mask, iterations=15)
            mask = binary_opening(mask, iterations=10)
            if fill_holes:
                mask = binary_fill_holes(mask)
            new_img[np.where(mask == 0)] = 255
        return lab, mask, new_img

    # affichage des segmentations basées sur LAB, canal ROUGE, VERT puis BLEU (4x4 images)
    if plot == 'all channels':
        _, axes = plt.subplots(nrows=4, ncols=4, figsize=(27, 24))
        mag = int(wsi.properties['openslide.objective-power']
                  ) / wsi.level_downsamples[level]
        for i, channel in enumerate(
            ['A (LAB)', 'rouge (RGB)', 'vert (RGB)', 'bleu (RGB)']):
            # on calcule le masque de segmentation sur la base de 4 canaux différents.
            lab, mask, new_img = get_mask(channel)
            axes[i, 0].imshow(img)
            axes[i, 0].set_axis_off()
            axes[i, 0].set_title("Niveau %d (grand. %.3f) de l'image brute" %
                                 (level, mag))
            axes[i, 1].imshow(lab)
            axes[i, 1].set_axis_off()
            axes[i, 1].set_title(f"Canal {channel}")
            axes[i, 2].imshow(mask, cmap='gray')
            axes[i, 2].set_axis_off()
            axes[i, 2].set_title('Masque de segmentation')
            axes[i, 3].imshow(img)
            axes[i, 3].set_axis_off()
            axes[i, 3].imshow(mask, alpha=0.3, cmap='gray')
            axes[i, 3].set_title('Sélection des tissus')
        plt.show()
        return None
    # affichage de la segmentation basée sur LAB uniquement (1x4 images)
    else:
        lab, mask, new_img = get_mask(channel='A (LAB)')
        if plot == 'lab only':
            _, axes = plt.subplots(nrows=1, ncols=4, figsize=(27, 6))
            mag = int(wsi.properties['openslide.objective-power']
                      ) / wsi.level_downsamples[level]
            # on calcule le masque de segmentation sur la base du canal A de LAB.
            lab, mask, new_img = get_mask(channel='A (LAB)')
            axes[0].imshow(img)
            axes[0].set_axis_off()
            axes[0].set_title("Niveau %d (grand. %.3f) de l'image brute" %
                              (level, mag))
            axes[1].imshow(lab)
            axes[1].set_axis_off()
            axes[1].set_title(f"Canal A (LAB)")
            axes[2].imshow(mask, cmap='gray')
            axes[2].set_axis_off()
            axes[2].set_title('Masque de segmentation')
            axes[3].imshow(img)
            axes[3].set_axis_off()
            axes[3].imshow(mask, alpha=0.3, cmap='gray')
            axes[3].set_title('Sélection des tissus')
            plt.show()
        return img, lab, new_img, mask
Exemple #6
0
class SlideImage(object):
    def __init__(self, filename, downsample=DEEPZOOM_DOWNSAMPLE_FACTOR):
        self.filename = filename
        self.downsample = downsample

        try:
            self.osr = OpenSlide(self.filename)
        except OpenSlideUnsupportedFormatError:
            print('Format of file {0} is not supported by openslide'.format(
                self.filename))
            raise
        except OpenSlideError:
            print('Unknown Openslide error')
            raise

        self.zoom = DeepZoomGenerator(self.osr,
                                      tile_size=DEEPZOOM_TILE_SIZE,
                                      limit_bounds=False)
        print("File {0} \nDimensions (Level 0) {1}\n".format(
            self.filename, self.osr.dimensions))
        print("DeepZoom properties:\nLevel count: {0}\nLevel Dimensions:{1}\n".
              format(self.zoom.level_count, self.zoom.level_dimensions))

    def set_downsample(self, ds):
        self.downsample = ds

    def get_image_size(self):

        return self.osr.dimensions

    def get_tile(self, z, coord):

        image = self.zoom.get_tile(z, coord)
        return image

    def get_image(self, coord, z, dim):

        # level = (self.osr.level_count -1) - round(z/self.osr.level_count)
        # level = max(0, min((self.osr.level_count-1), level))
        # downsample = self.osr.level_downsamples[level]

        level = self.osr.get_best_level_for_downsample(self.downsample)

        print("Level: {0} Z: {1} z/levelcount: {2}".format(
            level, z, z / self.osr.level_count))
        print(
            "W, H: {0} X, Y: {1} Z: {2} level: {3} downsample: {4} level_count: {5}"
            .format(dim, coord, z, level, self.downsample,
                    self.osr.level_count))

        dim = (int(dim[0] / self.downsample), int(dim[1] / self.downsample))
        image = self.osr.read_region(coord, level, dim)
        return image

    def infer(self, coord, z, dim):

        img = self.get_image(coord, z, dim)

        ## Inference code here
        ## Should be something like model.predict(img)
        ## img should be a python PIL image, and it expects the same as output.
        img = img.convert(mode="RGB")
        img.name = self.filename

        #infer = InfererURL(img, 'http://hovernet.northeurope.azurecontainer.io:8501/v1/models/hover_pannuke:predict', 'hv_seg_class_pannuke')
        #overlay = infer.run()
        #overlay = overlay[:, :, 0]

        # overlay = PIL.Image.fromarray(overlay, mode="P")
        # img.putalpha(img.convert(mode="L"))

        #plt.imshow(img)
        #plt.imshow(overlay, alpha=0.5)

        #plt.show()

        return img

    def benchmark(self):
        coord = (50371, 50357)
        z = 20
        dim = (5500, 4000)

        self.set_downsample(8)
        self.infer(coord, z, dim)