def testNewAndWriteCrop(): imagePath = datastore.fetch('sample_image.ptif') source = large_image.open(imagePath) out = large_image_source_vips.new() assert out.crop is None out.crop = 10, 10, 2000, 2000 assert out.crop is not None out.crop = None assert out.crop is None with pytest.raises(TileSourceError): out.crop = -1, -1, -1, -1 out.crop = 10, 10, 2000, 2000 for tile in source.tileIterator( format=large_image.constants.TILE_FORMAT_NUMPY, region=dict(right=4000, bottom=2000), ): out.addTile(tile['tile'], x=tile['x'], y=tile['y']) tmpdir = tempfile.mkdtemp() outputPath = os.path.join(tmpdir, 'temp.tiff') try: out.write(outputPath) result = large_image.open(outputPath) resultMetadata = result.getMetadata() assert resultMetadata['sizeX'] == 2000 assert resultMetadata['sizeY'] == 1990 finally: shutil.rmtree(tmpdir)
def testNewAndWrite(): imagePath = datastore.fetch('sample_image.ptif') source = large_image.open(imagePath) out = large_image_source_vips.new() for tile in source.tileIterator( format=large_image.constants.TILE_FORMAT_NUMPY, region=dict(right=4000, bottom=2000), ): out.addTile(tile['tile'], x=tile['x'], y=tile['y']) assert out.bandFormat == pyvips.enums.BandFormat.UCHAR assert out.bandRanges['min'][0] > 10 assert out.mm_x is None assert out.mm_y is None out.mm_x = source.getNativeMagnification()['mm_x'] out.mm_y = source.getNativeMagnification()['mm_y'] assert out.mm_x == 0.00025 assert out.mm_y == 0.00025 tmpdir = tempfile.mkdtemp() outputPath = os.path.join(tmpdir, 'temp.tiff') try: out.write(outputPath) assert os.path.getsize(outputPath) > 50000 result = large_image.open(outputPath) resultMetadata = result.getMetadata() assert resultMetadata['sizeX'] == 4000 finally: shutil.rmtree(tmpdir)
def testTileOverlap(): testDir = os.path.dirname(os.path.realpath(__file__)) imagePath = os.path.join(testDir, 'test_files', 'test_orient1.tif') ts = large_image.open(imagePath) assert [(tiles['x'], tiles['x'] + tiles['width'], tiles['width'], tiles['tile_overlap']['left'], tiles['tile_overlap']['right']) for tiles in ts.tileIterator(tile_size=dict(width=75, height=180), tile_overlap=dict(x=60))] == [ (0, 75, 75, 0, 30), (15, 90, 75, 30, 30), (30, 105, 75, 30, 30), (45, 120, 75, 30, 0), ] assert [(tiles['x'], tiles['x'] + tiles['width'], tiles['width'], tiles['tile_overlap']['left'], tiles['tile_overlap']['right']) for tiles in ts.tileIterator(tile_size=dict(width=75, height=180), tile_overlap=dict(x=60, edges=True)) ] == [ (0, 45, 45, 0, 30), (0, 60, 60, 15, 30), (0, 75, 75, 30, 30), (15, 90, 75, 30, 30), (30, 105, 75, 30, 30), (45, 120, 75, 30, 30), (60, 120, 60, 30, 15), (75, 120, 45, 30, 0), ]
def testConvertToAperio(tmpdir): imagePath = datastore.fetch('huron.image2_jpeg2k.tif') outputPath = os.path.join(tmpdir, 'out.svs') large_image_converter.convert(imagePath, outputPath, format='aperio') source = large_image.open(outputPath) assert 'openslide' in source.name assert 'label' in source.getAssociatedImagesList()
def testConvertMultiframeToAperio(tmpdir): imagePath = datastore.fetch('sample.ome.tif') outputPath = os.path.join(tmpdir, 'out.tiff') large_image_converter.convert(imagePath, outputPath, format='aperio', compression='jp2k') source = large_image.open(outputPath) assert 'label' in source.getAssociatedImagesList()
def testNewAndWriteLossless(): imagePath = datastore.fetch('sample_image.ptif') source = large_image.open(imagePath) out = large_image_source_vips.new() for tile in source.tileIterator( format=large_image.constants.TILE_FORMAT_NUMPY, region=dict(right=4000, bottom=2000), ): out.addTile(tile['tile'], x=tile['x'], y=tile['y']) tmpdir = tempfile.mkdtemp() outputPath = os.path.join(tmpdir, 'temp.tiff') try: out.write(outputPath, lossy=False) assert os.path.getsize(outputPath) > 50000 result = large_image.open(outputPath) resultMetadata = result.getMetadata() assert resultMetadata['sizeX'] == 4000 finally: shutil.rmtree(tmpdir)
def testTileOverlapWithRegionOffset(): imagePath = datastore.fetch('sample_image.ptif') ts = large_image.open(imagePath) tileIter = ts.tileIterator(region=dict(left=10000, top=10000, width=6000, height=6000), tile_size=dict(width=1936, height=1936), tile_overlap=dict(x=400, y=400)) firstTile = next(tileIter) assert firstTile['tile_overlap']['right'] == 200
def testNewAndWriteJPEG(): imagePath = datastore.fetch('sample_image.ptif') source = large_image.open(imagePath) # Update this if it doesn't direct to vips source out = large_image.new() for tile in source.tileIterator( format=large_image.constants.TILE_FORMAT_NUMPY, region=dict(right=4000, bottom=2000), ): out.addTile(tile['tile'], x=tile['x'], y=tile['y']) tmpdir = tempfile.mkdtemp() outputPath = os.path.join(tmpdir, 'temp.jpeg') try: out.write(outputPath, vips_kwargs=dict(Q=80)) assert os.path.getsize(outputPath) > 50000 image = open(outputPath, 'rb').read() assert image[:len(utilities.JPEGHeader)] == utilities.JPEGHeader finally: shutil.rmtree(tmpdir)
def testLazyTileRelease(): imagePath = datastore.fetch('sample_image.ptif') ts = large_image.open(imagePath) tiles = list( ts.tileIterator(scale={'magnification': 2.5}, format=large_image.constants.TILE_FORMAT_IMAGE, encoding='PNG')) assert isinstance(tiles[5], large_image.tilesource.tiledict.LazyTileDict) assert super(large_image.tilesource.tiledict.LazyTileDict, tiles[5]).__getitem__('tile') is None data = tiles[5]['tile'] assert len(tiles[5]['tile']) > 0 assert super(large_image.tilesource.tiledict.LazyTileDict, tiles[5]).__getitem__('tile') is not None tiles[5].release() assert super(large_image.tilesource.tiledict.LazyTileDict, tiles[5]).__getitem__('tile') is None assert tiles[5]['tile'] == data
def _py_read_chunk_pixels( self, chunk_top, chunk_left, chunk_bottom, chunk_right, filename, returned_magnification, factor, ): """ Read from disk all the pixel data for a specific chunk of the whole slide. """ filename = filename.numpy().decode("utf-8") chunk_top = math.floor(chunk_top.numpy() / factor.numpy() + 0.01) chunk_left = math.floor(chunk_left.numpy() / factor.numpy() + 0.01) chunk_bottom = math.floor(chunk_bottom.numpy() / factor.numpy() + 0.01) chunk_right = math.floor(chunk_right.numpy() / factor.numpy() + 0.01) returned_magnification = returned_magnification.numpy() import large_image ts = large_image.open(filename) chunk = ts.getRegion( scale=dict(magnification=returned_magnification), format=large_image.constants.TILE_FORMAT_NUMPY, region=dict( left=chunk_left, top=chunk_top, width=chunk_right - chunk_left, height=chunk_bottom - chunk_top, units="mag_pixels", ), )[0] # Do we want to support other than RGB and/or other than uint8?!!! return tf.convert_to_tensor(chunk[..., :3], dtype=tf.uint8)
def testGetDummyTileSource(): source = large_image.open('large_image://dummy') assert isinstance(source, large_image_source_dummy.DummyTileSource)
def testClassRepr(): imagePath = datastore.fetch('sample_image.ptif') ts = large_image.open(imagePath) assert 'sample_image.ptif' in repr(ts)
def testBaseFileNotFound(): with pytest.raises(large_image.exceptions.TileSourceFileNotFoundError): large_image.open('nosuchfile') with pytest.raises(large_image.exceptions.TileSourceFileNotFoundError): large_image.open('nosuchfile.ext')
def __call__(self, slide): """ Add level, target_magnification, scan_magnification, read_magnification, returned_magnification, number_pixel_rows_for_slide, and number_pixel_columns_for_slide fields to a slide dictionary. """ # Check values. if "filename" not in slide: raise ValueError('slide["filename"] must be already set.') filename = slide["filename"] # Do the work. if not re.compile(r"\.zarr$").search(filename): import large_image # read whole-slide image file and create large_image object ts = large_image.open(filename) # scan_magnification = highest available magnification from source scan_magnification = float( ts.getNativeMagnification()["magnification"]) if self.magnification_source == "exact": # Use the tile-source level that large_image is willing to interpolate # for us. preferred_levels = [ ts.getLevelForMagnification(self.target_magnification, rounding=False) ] else: # self.magnification_source in ["scan", "native"] # Use one of the tile-source levels that is stored in the image file. preferred_levels = list( set( ts.getPreferredLevel(level) for level in range(ts.levels))) preferred_levels.sort(reverse=True) if self.magnification_source == "scan": # Keep only the maximum tile-source level preferred_levels = preferred_levels[0:1] estimated_magnifications = np.array([ float(ts.getMagnificationForLevel(level)["magnification"]) for level in preferred_levels ]) # Find best tile-source level to use (level, returned_magnification) = self._get_level_and_magnifications( self.target_magnification, estimated_magnifications) # Rather than as the index into preferred_levels, change level to be the # value that large_image uses level = preferred_levels[level] # If large_image is resampling a native level for us, it is starting with # the preferred level that is the least one that is not smaller than the # resampled level. read_magnification = float( ts.getMagnificationForLevel( min([ ts.getPreferredLevel(i) for i in range(ts.levels) if i >= level ]))["magnification"]) slide["target_magnification"] = self.target_magnification slide["scan_magnification"] = scan_magnification slide["read_magnification"] = read_magnification slide["returned_magnification"] = returned_magnification # We don't want to walk off the right or bottom of the slide so we are # conservative as to how many pixels large_image will return for us. # 1) large_image starts with an image that is of # read_magnification; we compute the dimensions for read_magnification # with math.floor from the dimensions of scan_magnification (i.e., # ts.sizeX and ts.sizeY) to be conservative. # 2) large_image or external software may resampled from the # read_magnification to the target_magnification; we compute dimensions # for the target_magnification with math.floor from the # read_magnification to be conservative. number_pixel_rows_for_slide = ts.sizeY number_pixel_columns_for_slide = ts.sizeX if scan_magnification != read_magnification: number_pixel_rows_for_slide = math.floor( number_pixel_rows_for_slide * read_magnification / scan_magnification) number_pixel_columns_for_slide = math.floor( number_pixel_columns_for_slide * read_magnification / scan_magnification) if read_magnification != self.target_magnification: number_pixel_rows_for_slide = math.floor( number_pixel_rows_for_slide * self.target_magnification / read_magnification) number_pixel_columns_for_slide = math.floor( number_pixel_columns_for_slide * self.target_magnification / read_magnification) else: import zarr import openslide as os # read whole-slide image and create zarr objects store = zarr.DirectoryStore(filename) source_group = zarr.open(store, mode="r") # scan_magnification = highest available magnification from source scan_magnification = float( source_group.attrs[os.PROPERTY_NAME_OBJECTIVE_POWER]) preferred_levels = list( range(0, source_group.attrs["level_downsamples"])) if self.magnification_source == "scan": preferred_levels = [ np.argmin(source_group.attrs["level_downsamples"]) ] # calculate magnifications of levels estimated_magnifications = np.array( scan_magnification / source_group.attrs["level_downsamples"][level] for level in preferred_levels) # Find best native level to use (level, returned_magnification) = self._get_level_and_magnifications( self.target_magnification, estimated_magnifications) # Rather than as the index into preferred_levels, change level to be the # value that zarr uses level = preferred_levels[level] slide["target_magnification"] = self.target_magnification slide["scan_magnification"] = scan_magnification slide["read_magnification"] = returned_magnification slide["returned_magnification"] = returned_magnification # get slide number_pixel_columns_for_slide, number_pixel_rows_for_slide at # desired magnification. (Note that number_pixel_rows_for_slide is before # number_pixel_columns_for_slide) number_pixel_rows_for_slide, number_pixel_columns_for_slide = source_group[ format(level)].shape[0:2] if (self.magnification_source == "exact" and self.target_magnification != returned_magnification): raise ValueError( f"Couldn't find magnification {self.target_magnification}X in Zarr storage." ) slide["level"] = level # Note that slide size is defined by the requested magnification, which may not # be the same as the magnification for the selected level. To get the slide # size for the magnification that we are using, these values must later be # multiplied by returned_magnification / target_magnification. slide["number_pixel_rows_for_slide"] = number_pixel_rows_for_slide slide[ "number_pixel_columns_for_slide"] = number_pixel_columns_for_slide